code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
def A_ ( snake_case : List[str] ) -> list[int]: # This function is recursive
'''simple docstring'''
__UpperCamelCase = len(__UpperCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__UpperCamelCase = array[0]
__UpperCamelCase = False
__UpperCamelCase = 1
__UpperCamelCase = []
while not is_found and i < array_length:
if array[i] < pivot:
__UpperCamelCase = True
__UpperCamelCase = [element for element in array[i:] if element >= array[i]]
__UpperCamelCase = longest_subsequence(__UpperCAmelCase )
if len(__UpperCAmelCase ) > len(__UpperCAmelCase ):
__UpperCamelCase = temp_array
else:
i += 1
__UpperCamelCase = [element for element in array[1:] if element >= pivot]
__UpperCamelCase = [pivot, *longest_subsequence(__UpperCAmelCase )]
if len(__UpperCAmelCase ) > len(__UpperCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328
|
'''simple docstring'''
from __future__ import annotations
import math
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(__UpperCAmelCase ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), minimax(depth + 1, node_index * 2 + 1, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), )
return min(
minimax(depth + 1, node_index * 2, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), minimax(depth + 1, node_index * 2 + 1, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), )
def __magic_name__ ( ) -> None:
'''simple docstring'''
snake_case_ = [90, 23, 6, 33, 21, 65, 123, 3_4423]
snake_case_ = math.log(len(__UpperCAmelCase ), 2 )
print('''Optimal value : ''', end='''''' )
print(minimax(0, 0, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 56
| 0
|
"""simple docstring"""
from collections import defaultdict
class A__ :
'''simple docstring'''
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Tuple) -> Any:
"""simple docstring"""
__lowerCAmelCase : Any = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
__lowerCAmelCase : Optional[Any] = [
[-1 for i in range(total + 1)] for j in range(2 ** len(_SCREAMING_SNAKE_CASE))
]
__lowerCAmelCase : List[str] = defaultdict(_SCREAMING_SNAKE_CASE) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
__lowerCAmelCase : Tuple = (1 << len(_SCREAMING_SNAKE_CASE)) - 1
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Tuple) -> Any:
"""simple docstring"""
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
__lowerCAmelCase : Dict = self.count_ways_until(_SCREAMING_SNAKE_CASE , task_no + 1)
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1)
# save the value.
__lowerCAmelCase : Optional[Any] = total_ways_util
return self.dp[mask][task_no]
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Tuple) -> List[str]:
"""simple docstring"""
for i in range(len(_SCREAMING_SNAKE_CASE)):
for j in task_performed[i]:
self.task[j].append(_SCREAMING_SNAKE_CASE)
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1)
if __name__ == "__main__":
__snake_case : Optional[int] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__snake_case : List[str] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 58
|
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowercase ( __snake_case ) -> Optional[Any]:
__lowerCAmelCase : str = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" ,__snake_case ).groups()[0]
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: Any=None) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : str = file_names
__lowerCAmelCase : Optional[int] = image_transform
__lowerCAmelCase : List[Any] = label_to_id
def __len__( self: Union[str, Any]) -> int:
"""simple docstring"""
return len(self.file_names)
def __getitem__( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = self.file_names[idx]
__lowerCAmelCase : List[str] = PIL.Image.open(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = raw_image.convert("RGB")
if self.image_transform is not None:
__lowerCAmelCase : Union[str, Any] = self.image_transform(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = extract_label(_SCREAMING_SNAKE_CASE)
if self.label_to_id is not None:
__lowerCAmelCase : str = self.label_to_id[label]
return {"image": image, "label": label}
def _lowercase ( __snake_case ,__snake_case ) -> Optional[int]:
# Initialize accelerator
if args.with_tracking:
__lowerCAmelCase : str = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,log_with="all" ,project_dir=args.project_dir )
else:
__lowerCAmelCase : Any = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase : int = config["lr"]
__lowerCAmelCase : Union[str, Any] = int(config["num_epochs"] )
__lowerCAmelCase : Tuple = int(config["seed"] )
__lowerCAmelCase : Tuple = int(config["batch_size"] )
__lowerCAmelCase : int = config["image_size"]
if not isinstance(__snake_case ,(list, tuple) ):
__lowerCAmelCase : Tuple = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps ,"isdigit" ):
if args.checkpointing_steps == "epoch":
__lowerCAmelCase : Optional[Any] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
__lowerCAmelCase : Dict = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
__lowerCAmelCase : int = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
__lowerCAmelCase : Dict = os.path.split(__snake_case )[-1].split("." )[0]
accelerator.init_trackers(__snake_case ,__snake_case )
# Grab all the image filenames
__lowerCAmelCase : Union[str, Any] = [os.path.join(args.data_dir ,__snake_case ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
__lowerCAmelCase : Union[str, Any] = [extract_label(__snake_case ) for fname in file_names]
__lowerCAmelCase : Any = list(set(__snake_case ) )
id_to_label.sort()
__lowerCAmelCase : Optional[Any] = {lbl: i for i, lbl in enumerate(__snake_case )}
# Set the seed before splitting the data.
np.random.seed(__snake_case )
torch.manual_seed(__snake_case )
torch.cuda.manual_seed_all(__snake_case )
# Split our filenames between train and validation
__lowerCAmelCase : List[str] = np.random.permutation(len(__snake_case ) )
__lowerCAmelCase : Dict = int(0.8 * len(__snake_case ) )
__lowerCAmelCase : str = random_perm[:cut]
__lowerCAmelCase : Optional[int] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
__lowerCAmelCase : str = Compose([RandomResizedCrop(__snake_case ,scale=(0.5, 1.0) ), ToTensor()] )
__lowerCAmelCase : List[str] = PetsDataset(
[file_names[i] for i in train_split] ,image_transform=__snake_case ,label_to_id=__snake_case )
# For evaluation, we use a deterministic Resize
__lowerCAmelCase : Union[str, Any] = Compose([Resize(__snake_case ), ToTensor()] )
__lowerCAmelCase : List[str] = PetsDataset([file_names[i] for i in eval_split] ,image_transform=__snake_case ,label_to_id=__snake_case )
# Instantiate dataloaders.
__lowerCAmelCase : Union[str, Any] = DataLoader(__snake_case ,shuffle=__snake_case ,batch_size=__snake_case ,num_workers=4 )
__lowerCAmelCase : Any = DataLoader(__snake_case ,shuffle=__snake_case ,batch_size=__snake_case ,num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase : int = create_model("resnet50d" ,pretrained=__snake_case ,num_classes=len(__snake_case ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase : List[str] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
__lowerCAmelCase : Any = False
for param in model.get_classifier().parameters():
__lowerCAmelCase : List[Any] = True
# We normalize the batches of images to be a bit faster.
__lowerCAmelCase : Optional[Any] = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
__lowerCAmelCase : int = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase : int = torch.optim.Adam(params=model.parameters() ,lr=lr / 25 )
# Instantiate learning rate scheduler
__lowerCAmelCase : List[Any] = OneCycleLR(optimizer=__snake_case ,max_lr=__snake_case ,epochs=__snake_case ,steps_per_epoch=len(__snake_case ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = accelerator.prepare(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
# We need to keep track of how many total steps we have iterated over
__lowerCAmelCase : Dict = 0
# We also need to keep track of the starting epoch so files are named properly
__lowerCAmelCase : List[str] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
__lowerCAmelCase : Optional[Any] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
__lowerCAmelCase : Optional[int] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
__lowerCAmelCase : Optional[Any] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
__lowerCAmelCase : str = os.path.splitext(__snake_case )[0]
if "epoch" in training_difference:
__lowerCAmelCase : Dict = int(training_difference.replace("epoch_" ,"" ) ) + 1
__lowerCAmelCase : Optional[Any] = None
else:
__lowerCAmelCase : Any = int(training_difference.replace("step_" ,"" ) )
__lowerCAmelCase : Optional[int] = resume_step // len(__snake_case )
resume_step -= starting_epoch * len(__snake_case )
# Now we train the model
for epoch in range(__snake_case ,__snake_case ):
model.train()
if args.with_tracking:
__lowerCAmelCase : Any = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
__lowerCAmelCase : Optional[int] = accelerator.skip_first_batches(__snake_case ,__snake_case )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
__lowerCAmelCase : Optional[int] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
__lowerCAmelCase : List[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
__lowerCAmelCase : Union[str, Any] = (batch["image"] - mean) / std
__lowerCAmelCase : Optional[int] = model(__snake_case )
__lowerCAmelCase : List[str] = torch.nn.functional.cross_entropy(__snake_case ,batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__snake_case ,__snake_case ):
__lowerCAmelCase : List[Any] = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
__lowerCAmelCase : Tuple = os.path.join(args.output_dir ,__snake_case )
accelerator.save_state(__snake_case )
model.eval()
__lowerCAmelCase : int = 0
__lowerCAmelCase : Optional[int] = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
__lowerCAmelCase : Tuple = {k: v.to(accelerator.device ) for k, v in batch.items()}
__lowerCAmelCase : Optional[Any] = (batch["image"] - mean) / std
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = model(__snake_case )
__lowerCAmelCase : List[str] = outputs.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = accelerator.gather_for_metrics((predictions, batch["label"]) )
__lowerCAmelCase : str = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
__lowerCAmelCase : Optional[Any] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(__snake_case ),
"epoch": epoch,
} ,step=__snake_case ,)
if checkpointing_steps == "epoch":
__lowerCAmelCase : Tuple = F"""epoch_{epoch}"""
if args.output_dir is not None:
__lowerCAmelCase : Optional[Any] = os.path.join(args.output_dir ,__snake_case )
accelerator.save_state(__snake_case )
if args.with_tracking:
accelerator.end_training()
def _lowercase ( ) -> Tuple:
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" ,required=__snake_case ,help="The data folder on disk." )
parser.add_argument("--fp16" ,action="store_true" ,help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" ,type=__snake_case ,default=__snake_case ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." ,)
parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" ,type=__snake_case ,default=__snake_case ,help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." ,)
parser.add_argument(
"--output_dir" ,type=__snake_case ,default="." ,help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." ,)
parser.add_argument(
"--resume_from_checkpoint" ,type=__snake_case ,default=__snake_case ,help="If the training should continue from a checkpoint folder." ,)
parser.add_argument(
"--with_tracking" ,action="store_true" ,help="Whether to load in all available experiment trackers from the environment and use them for logging." ,)
parser.add_argument(
"--project_dir" ,type=__snake_case ,default="logs" ,help="Location on where to store experiment tracking logs` and relevent project information" ,)
__lowerCAmelCase : List[Any] = parser.parse_args()
__lowerCAmelCase : List[Any] = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(__snake_case ,__snake_case )
if __name__ == "__main__":
main()
| 58
| 1
|
_A = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
_A = ['a', 'b', 'c', 'd', 'e']
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =start
# add current to visited
visited.append(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__UpperCamelCase =topological_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# if all neighbors visited add current to sort
sort.append(SCREAMING_SNAKE_CASE__ )
# if all vertices haven't been visited select a new one to visit
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
for vertice in vertices:
if vertice not in visited:
__UpperCamelCase =topological_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# return sort
return sort
if __name__ == "__main__":
_A = topological_sort('a', [], [])
print(sort)
| 62
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62
| 1
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A : Optional[int] = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
A : int = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(__lowerCAmelCase )
# Let's go
A : Dict = parser.parse_args()
if not hasattr(__lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
A : Union[str, Any] = args.func(__lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 350
|
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
A : Union[str, Any] = nn.Linear(3, 4 )
A : Union[str, Any] = nn.BatchNormad(4 )
A : Optional[Any] = nn.Linear(4, 5 )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase__ ) ) )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, *lowerCamelCase__, **lowerCamelCase__ ):
return (args[0] + 1,) + args[1:], kwargs
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
return output + 1
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Tuple = ModelForTest()
A : Any = ModelHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
self.assertEqual(test_model._hf_hook, lowerCamelCase__ )
self.assertTrue(hasattr(lowerCamelCase__, """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__, """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ), ["""x"""] )
remove_hook_from_module(lowerCamelCase__ )
self.assertFalse(hasattr(lowerCamelCase__, """_hf_hook""" ) )
self.assertFalse(hasattr(lowerCamelCase__, """_old_forward""" ) )
def _lowerCAmelCase ( self ):
A : Tuple = ModelForTest()
A : Optional[int] = ModelHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
add_hook_to_module(lowerCamelCase__, lowerCamelCase__, append=lowerCamelCase__ )
self.assertEqual(isinstance(test_model._hf_hook, lowerCamelCase__ ), lowerCamelCase__ )
self.assertEqual(len(test_model._hf_hook.hooks ), 2 )
self.assertTrue(hasattr(lowerCamelCase__, """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__, """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ), ["""x"""] )
remove_hook_from_module(lowerCamelCase__ )
self.assertFalse(hasattr(lowerCamelCase__, """_hf_hook""" ) )
self.assertFalse(hasattr(lowerCamelCase__, """_old_forward""" ) )
def _lowerCAmelCase ( self ):
A : Any = ModelForTest()
A : Tuple = torch.randn(2, 3 )
A : Optional[int] = test_model(x + 1 )
A : List[str] = test_model(x + 2 )
A : List[str] = PreForwardHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : List[str] = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
A : Optional[Any] = PreForwardHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : Tuple = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
A : Any = SequentialHook(PreForwardHook(), PreForwardHook() )
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : Optional[Any] = test_model(lowerCamelCase__ )
assert torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-5 )
def _lowerCAmelCase ( self ):
A : Tuple = ModelForTest()
A : Any = torch.randn(2, 3 )
A : Any = test_model(lowerCamelCase__ )
A : List[Any] = PostForwardHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : str = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__, output + 1, atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
A : Tuple = PostForwardHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : Optional[int] = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__, output + 1, atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
A : Dict = SequentialHook(PostForwardHook(), PostForwardHook() )
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : List[str] = test_model(lowerCamelCase__ )
assert torch.allclose(lowerCamelCase__, output + 2, atol=1e-5 )
def _lowerCAmelCase ( self ):
A : List[Any] = ModelForTest()
A : Tuple = torch.randn(2, 3 )
A : Union[str, Any] = test_model(lowerCamelCase__ )
A : List[Any] = PostForwardHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : List[Any] = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__, output + 1 ) )
self.assertTrue(outputa.requires_grad )
A : int = True
A : Tuple = test_model(lowerCamelCase__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _lowerCAmelCase ( self ):
A : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara, AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara, AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device, torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device, torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device(0 ) )
self.assertEqual(model.lineara.weight.device, torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
A : str = torch.randn(2, 3 )
A : int = model(lowerCamelCase__ )
self.assertEqual(output.device, torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowerCamelCase__, AlignDevicesHook(io_same_device=lowerCamelCase__ ) )
A : int = torch.randn(2, 3 ).to(0 )
A : str = model(lowerCamelCase__ )
self.assertEqual(output.device, torch.device(0 ) )
def _lowerCAmelCase ( self ):
A : List[str] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
A : int = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara, AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.lineara, AlignDevicesHook(**lowerCamelCase__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
A : Dict = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device, lowerCamelCase__ )
A : int = torch.randn(2, 3 )
A : List[Any] = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
A : int = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara, AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.lineara, AlignDevicesHook(**lowerCamelCase__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
A : int = torch.randn(2, 3 )
A : str = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
def _lowerCAmelCase ( self ):
A : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
A : Tuple = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(lowerCamelCase__, execution_device=lowerCamelCase__, offload=lowerCamelCase__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
A : Optional[int] = torch.device(lowerCamelCase__ )
self.assertEqual(model.batchnorm.running_mean.device, lowerCamelCase__ )
A : List[str] = torch.randn(2, 3 )
A : Optional[int] = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(lowerCamelCase__, execution_device=lowerCamelCase__, offload=lowerCamelCase__, offload_buffers=lowerCamelCase__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
A : List[str] = torch.randn(2, 3 )
A : List[str] = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
def _lowerCAmelCase ( self ):
A : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
A : List[str] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
lowerCamelCase__, execution_device=lowerCamelCase__, offload=lowerCamelCase__, weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
A : Any = torch.device(lowerCamelCase__ )
self.assertEqual(model.batchnorm.running_mean.device, lowerCamelCase__ )
A : Optional[Any] = torch.randn(2, 3 )
A : Tuple = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
lowerCamelCase__, execution_device=lowerCamelCase__, offload=lowerCamelCase__, weights_map=model.state_dict(), offload_buffers=lowerCamelCase__, )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
A : List[Any] = torch.randn(2, 3 )
A : Dict = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
| 115
| 0
|
def lowerCAmelCase_ ( __a , __a ) -> Dict:
"""simple docstring"""
lowerCamelCase__: Any =[0 for i in range(r + 1 )]
# nc0 = 1
lowerCamelCase__: List[str] =1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowerCamelCase__: List[str] =min(__a , __a )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 10
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__A = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , __a=None , ) -> Any:
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__: Optional[Any] =np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCamelCase__: Dict =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCamelCase__: Optional[Any] =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__: Any =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__: List[str] =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Union[str, Any]=99 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Any=0.02 , ) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: int =parent
lowerCamelCase__: List[str] =batch_size
lowerCamelCase__: Optional[int] =seq_length
lowerCamelCase__: Optional[Any] =is_training
lowerCamelCase__: str =use_labels
lowerCamelCase__: Optional[Any] =vocab_size
lowerCamelCase__: int =hidden_size
lowerCamelCase__: Dict =num_hidden_layers
lowerCamelCase__: Any =num_attention_heads
lowerCamelCase__: str =intermediate_size
lowerCamelCase__: int =hidden_act
lowerCamelCase__: Tuple =hidden_dropout_prob
lowerCamelCase__: List[str] =attention_probs_dropout_prob
lowerCamelCase__: Optional[int] =max_position_embeddings
lowerCamelCase__: int =eos_token_id
lowerCamelCase__: Union[str, Any] =pad_token_id
lowerCamelCase__: List[str] =bos_token_id
lowerCamelCase__: int =initializer_range
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
lowerCamelCase__: str =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
lowerCamelCase__: int =shift_tokens_right(UpperCAmelCase_ , 1 , 2)
lowerCamelCase__: Dict =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase_ , )
lowerCamelCase__: Any =prepare_blenderbot_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Dict =self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =20
lowerCamelCase__: Optional[int] =model_class_name(UpperCAmelCase_)
lowerCamelCase__: str =model.encode(inputs_dict["input_ids"])
lowerCamelCase__ , lowerCamelCase__: List[Any] =(
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCamelCase__: Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4")
lowerCamelCase__: Tuple =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__: Union[str, Any] =model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
lowerCamelCase__: Dict =model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: List[Any] =model.decode(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] =20
lowerCamelCase__: Optional[Any] =model_class_name(UpperCAmelCase_)
lowerCamelCase__: Any =model.encode(inputs_dict["input_ids"])
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =(
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCamelCase__: Optional[int] =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowerCamelCase__: Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__: List[Any] =model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Dict =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
lowerCamelCase__: str =model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =model.decode(UpperCAmelCase_ , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_)
lowerCamelCase__: str =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = 99
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowerCamelCase__: Optional[Any] =input_ids.shape[0]
lowerCamelCase__: List[str] =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =self._get_config_and_data()
lowerCamelCase__: Dict =FlaxBlenderbotForConditionalGeneration(UpperCAmelCase_)
lowerCamelCase__: Dict =lm_model(input_ids=UpperCAmelCase_)
lowerCamelCase__: Dict =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[int] =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowerCamelCase__: str =FlaxBlenderbotForConditionalGeneration(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa)
lowerCamelCase__: Optional[int] =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa)
lowerCamelCase__: List[str] =lm_model(input_ids=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_)
lowerCamelCase__: Optional[int] =(*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[int] =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa)
lowerCamelCase__: Optional[int] =shift_tokens_right(UpperCAmelCase_ , 1 , 2)
lowerCamelCase__: List[str] =np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum()
lowerCamelCase__: Tuple =np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(UpperCAmelCase_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = True
lowercase_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =FlaxBlenderbotModelTester(self)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->str:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowerCamelCase__: List[str] =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model_class(UpperCAmelCase_)
@jax.jit
def encode_jitted(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any=None , **UpperCAmelCase_ : List[str]):
return model.encode(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_)
with self.subTest("JIT Enabled"):
lowerCamelCase__: Any =encode_jitted(**UpperCAmelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
lowerCamelCase__: Tuple =encode_jitted(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_))
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowerCamelCase__: Optional[Any] =model_class(UpperCAmelCase_)
lowerCamelCase__: List[Any] =model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"])
lowerCamelCase__: int ={
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]):
return model.decode(
decoder_input_ids=UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , encoder_outputs=UpperCAmelCase_ , )
with self.subTest("JIT Enabled"):
lowerCamelCase__: int =decode_jitted(**UpperCAmelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
lowerCamelCase__: int =decode_jitted(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_))
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def SCREAMING_SNAKE_CASE_ (self : Any) ->Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__: Optional[int] =model_class_name.from_pretrained("facebook/blenderbot-400M-distill")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCamelCase__: int =np.ones((1, 1)) * model.config.eos_token_id
lowerCamelCase__: str =model(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU.")
@slow
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict:
'''simple docstring'''
lowerCamelCase__: Dict ={"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
lowerCamelCase__: Union[str, Any] ={"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
lowerCamelCase__: Dict =FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=UpperCAmelCase_)
lowerCamelCase__: List[str] =BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B")
lowerCamelCase__: Any =["Sam"]
lowerCamelCase__: Tuple =tokenizer(UpperCAmelCase_ , return_tensors="jax")
lowerCamelCase__: Optional[Any] =model.generate(**UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Any ="Sam is a great name. It means \"sun\" in Gaelic."
lowerCamelCase__: Optional[Any] =tokenizer.batch_decode(UpperCAmelCase_ , **UpperCAmelCase_)
assert generated_txt[0].strip() == tgt_text
| 10
| 1
|
"""simple docstring"""
def a__ ( __lowercase ) -> int:
_A = [[0 for _ in range(__lowercase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
_A = 1
for n in range(m + 1 ):
for k in range(1 , __lowercase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
a_ = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
a_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 163
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"configuration_x_clip": [
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XCLIPConfig",
"XCLIPTextConfig",
"XCLIPVisionConfig",
],
"processing_x_clip": ["XCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"XCLIPModel",
"XCLIPPreTrainedModel",
"XCLIPTextModel",
"XCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 163
| 1
|
import random
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =num - 1
__UpperCamelCase =0
while s % 2 == 0:
__UpperCamelCase =s // 2
t += 1
for _ in range(5 ):
__UpperCamelCase =random.randrange(2 , num - 1 )
__UpperCamelCase =pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if v != 1:
__UpperCamelCase =0
while v != (num - 1):
if i == t - 1:
return False
else:
__UpperCamelCase =i + 1
__UpperCamelCase =(v**2) % num
return True
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
if num < 2:
return False
__UpperCamelCase =[
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 10_24 ):
while True:
__UpperCamelCase =random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(SCREAMING_SNAKE_CASE__ ):
return num
if __name__ == "__main__":
_A = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 62
|
from numpy import exp, pi, sqrt
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62
| 1
|
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = 0
__magic_name__ = False
__magic_name__ = 3.0
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_lowerCAmelCase : Dict = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_lowerCAmelCase : str = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , snake_case__ )
@require_multi_gpu
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase : int = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCAmelCase : Tuple = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCAmelCase : Optional[Any] = torch.nn.Linear(1_00, 2_00)
lowerCAmelCase : List[str] = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCAmelCase : List[Any] = """"""
lowerCAmelCase : Tuple = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 25
|
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = {
"""7B""": 1_10_08,
"""13B""": 1_38_24,
"""30B""": 1_79_20,
"""65B""": 2_20_16,
"""70B""": 2_86_72,
}
lowerCAmelCase : Optional[int] = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def lowercase (_A , _A=1 , _A=2_5_6 ):
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowercase (_A ):
"""simple docstring"""
with open(_A , 'r' ) as f:
return json.load(_A )
def lowercase (_A , _A ):
"""simple docstring"""
with open(_A , 'w' ) as f:
json.dump(_A , _A )
def lowercase (_A , _A , _A , _A=True ):
"""simple docstring"""
os.makedirs(_A , exist_ok=_A )
_lowerCAmelCase : Optional[Any] = os.path.join(_A , 'tmp' )
os.makedirs(_A , exist_ok=_A )
_lowerCAmelCase : Any = read_json(os.path.join(_A , 'params.json' ) )
_lowerCAmelCase : List[str] = NUM_SHARDS[model_size]
_lowerCAmelCase : str = params['n_layers']
_lowerCAmelCase : Optional[int] = params['n_heads']
_lowerCAmelCase : int = n_heads // num_shards
_lowerCAmelCase : Optional[int] = params['dim']
_lowerCAmelCase : Union[str, Any] = dim // n_heads
_lowerCAmelCase : Union[str, Any] = 10_000.0
_lowerCAmelCase : str = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_lowerCAmelCase : Optional[Any] = params['n_kv_heads'] # for GQA / MQA
_lowerCAmelCase : str = n_heads_per_shard // num_key_value_heads
_lowerCAmelCase : Optional[int] = dim // num_key_value_heads
else: # compatibility with other checkpoints
_lowerCAmelCase : Union[str, Any] = n_heads
_lowerCAmelCase : Any = n_heads_per_shard
_lowerCAmelCase : Optional[Any] = dim
# permute for sliced rotary
def permute(_A , _A=n_heads , _A=dim , _A=dim ):
return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_lowerCAmelCase : List[Any] = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
_lowerCAmelCase : List[Any] = [
torch.load(os.path.join(_A , f'consolidated.{i:02d}.pth' ) , map_location='cpu' )
for i in range(_A )
]
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Union[str, Any] = {'weight_map': {}}
for layer_i in range(_A ):
_lowerCAmelCase : List[str] = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase : str = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_lowerCAmelCase : str = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
_lowerCAmelCase : List[str] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) )
_lowerCAmelCase : Optional[int] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , )
_lowerCAmelCase : Dict = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A )
_lowerCAmelCase : Dict = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(_A )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(_A )] , dim=0 )
_lowerCAmelCase : Tuple = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(_A )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(_A )] , dim=0 )
_lowerCAmelCase : int = inv_freq
for k, v in state_dict.items():
_lowerCAmelCase : Optional[Any] = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
_lowerCAmelCase : Dict = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase : List[str] = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
_lowerCAmelCase : List[str] = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ),
}
for k, v in state_dict.items():
_lowerCAmelCase : int = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
# Write configs
_lowerCAmelCase : Tuple = {'total_size': param_count * 2}
write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) )
_lowerCAmelCase : Optional[int] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
_lowerCAmelCase : int = params['multiple_of'] if 'multiple_of' in params else 2_5_6
_lowerCAmelCase : List[Any] = LlamaConfig(
hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , )
config.save_pretrained(_A )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
_lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(_A , safe_serialization=_A )
shutil.rmtree(_A )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
_lowerCAmelCase : List[Any] = tokenizer_class(_A )
tokenizer.save_pretrained(_A )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' )
_lowerCAmelCase : Any = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_lowerCAmelCase : Dict = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , _A )
if __name__ == "__main__":
main()
| 25
| 1
|
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowercase_ = {
"""sample_size""": 32,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [32, 64],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowercase_ = {
"""sample_size""": 64,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowercase_ = {
"""sample_size""": 256,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowercase_ = {
"""num_train_timesteps""": 40,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
lowercase_ = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
lowercase_ = {
"""num_train_timesteps""": 151,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
def lowerCamelCase ( __lowerCamelCase : Dict ) ->Optional[int]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int]=False ) ->List[Any]:
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.0.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.0.bias']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.2.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.2.bias']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.emb_layers.1.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.emb_layers.1.bias']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.0.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.0.bias']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.3.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.3.bias']
if has_skip:
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.skip_connection.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.skip_connection.bias']
return new_checkpoint
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any]=None ) ->int:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.qkv.weight'].chunk(3 , dim=0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.qkv.bias'].chunk(3 , dim=0 )
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.norm.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.norm.bias']
_SCREAMING_SNAKE_CASE = weight_q.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = bias_q.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = weight_k.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = bias_k.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = weight_v.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = bias_v.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = (
checkpoint[F'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 )
)
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Tuple ) ->List[str]:
_SCREAMING_SNAKE_CASE = torch.load(__lowerCamelCase , map_location="""cpu""" )
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = checkpoint["""time_embed.0.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""time_embed.0.bias"""]
_SCREAMING_SNAKE_CASE = checkpoint["""time_embed.2.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
_SCREAMING_SNAKE_CASE = checkpoint["""label_emb.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""input_blocks.0.0.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""input_blocks.0.0.bias"""]
_SCREAMING_SNAKE_CASE = unet_config["""down_block_types"""]
_SCREAMING_SNAKE_CASE = unet_config["""layers_per_block"""]
_SCREAMING_SNAKE_CASE = unet_config["""attention_head_dim"""]
_SCREAMING_SNAKE_CASE = unet_config["""block_out_channels"""]
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = channels_list[0]
for i, layer_type in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = channels_list[i]
_SCREAMING_SNAKE_CASE = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = F'down_blocks.{i}.resnets.{j}'
_SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = True if j == 0 and downsample_block_has_skip else False
_SCREAMING_SNAKE_CASE = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_skip=__lowerCamelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = F'down_blocks.{i}.resnets.{j}'
_SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = True if j == 0 and downsample_block_has_skip else False
_SCREAMING_SNAKE_CASE = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_skip=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = F'down_blocks.{i}.attentions.{j}'
_SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.1'
_SCREAMING_SNAKE_CASE = convert_attention(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
current_layer += 1
if i != len(__lowerCamelCase ) - 1:
_SCREAMING_SNAKE_CASE = F'down_blocks.{i}.downsamplers.0'
_SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
current_layer += 1
_SCREAMING_SNAKE_CASE = current_channels
# hardcoded the mid-block for now
_SCREAMING_SNAKE_CASE = """mid_block.resnets.0"""
_SCREAMING_SNAKE_CASE = """middle_block.0"""
_SCREAMING_SNAKE_CASE = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = """mid_block.attentions.0"""
_SCREAMING_SNAKE_CASE = """middle_block.1"""
_SCREAMING_SNAKE_CASE = convert_attention(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = """mid_block.resnets.1"""
_SCREAMING_SNAKE_CASE = """middle_block.2"""
_SCREAMING_SNAKE_CASE = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = unet_config["""up_block_types"""]
for i, layer_type in enumerate(__lowerCamelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.resnets.{j}'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_skip=__lowerCamelCase )
current_layer += 1
if i != len(__lowerCamelCase ) - 1:
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.upsamplers.0'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer-1}.1'
_SCREAMING_SNAKE_CASE = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.resnets.{j}'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_skip=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.attentions.{j}'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer}.1'
_SCREAMING_SNAKE_CASE = convert_attention(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
current_layer += 1
if i != len(__lowerCamelCase ) - 1:
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.upsamplers.0'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer-1}.2'
_SCREAMING_SNAKE_CASE = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = checkpoint["""out.0.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""out.0.bias"""]
_SCREAMING_SNAKE_CASE = checkpoint["""out.2.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
lowercase_ = parser.parse_args()
lowercase_ = strabool(args.class_cond)
lowercase_ = os.path.basename(args.unet_path)
print(f"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
lowercase_ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowercase_ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowercase_ = TEST_UNET_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
lowercase_ = None
lowercase_ = con_pt_to_diffuser(args.unet_path, unet_config)
lowercase_ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowercase_ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowercase_ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowercase_ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
lowercase_ = CMStochasticIterativeScheduler(**scheduler_config)
lowercase_ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 58
|
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowercase_ = logging.getLogger(__name__)
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=30_522, type=int)
lowercase_ = parser.parse_args()
logger.info(f"""Loading data from {args.data_file}""")
with open(args.data_file, """rb""") as fp:
lowercase_ = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
lowercase_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowercase_ = [0] * args.vocab_size
for k, v in counter.items():
lowercase_ = v
logger.info(f"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 58
| 1
|
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def lowerCamelCase_ ( _a : List[Any] , _a : List[Any] , _a : Optional[Any] , _a : Dict=None , _a : List[Any]=None , _a : Dict=None , _a : Optional[Any]=None , _a : Dict=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase_ : int = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCAmelCase_ : Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCAmelCase_ : Optional[int] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_a )
if decoder_head_mask is None:
UpperCAmelCase_ : Optional[int] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_a )
if cross_attn_head_mask is None:
UpperCAmelCase_ : Any = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class _snake_case :
'''simple docstring'''
def __init__( self: Union[str, Any] ,lowerCamelCase_: int ,lowerCamelCase_: Dict=13 ,lowerCamelCase_: Optional[int]=7 ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: Tuple=False ,lowerCamelCase_: Union[str, Any]=99 ,lowerCamelCase_: Any=16 ,lowerCamelCase_: Any=2 ,lowerCamelCase_: List[Any]=4 ,lowerCamelCase_: Optional[Any]=4 ,lowerCamelCase_: str="relu" ,lowerCamelCase_: Any=0.1 ,lowerCamelCase_: Dict=0.1 ,lowerCamelCase_: int=0.0 ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: Optional[int]=20 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: List[Any]=1 ,lowerCamelCase_: str=0 ,) -> int:
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Optional[Any] = seq_length
UpperCAmelCase_ : Union[str, Any] = is_training
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : int = encoder_layerdrop
UpperCAmelCase_ : str = decoder_layerdrop
UpperCAmelCase_ : Dict = max_position_embeddings
UpperCAmelCase_ : Any = eos_token_id
UpperCAmelCase_ : Any = pad_token_id
UpperCAmelCase_ : Optional[Any] = bos_token_id
def A__ ( self: Optional[Any] ) -> Dict:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Any = self.eos_token_id # Eos Token
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCAmelCase_ : Optional[int] = input_ids.clamp(self.pad_token_id + 1 )
UpperCAmelCase_ : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCAmelCase_ : List[str] = self.get_config()
UpperCAmelCase_ : int = prepare_mam_aaa_inputs_dict(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
return config, inputs_dict
def A__ ( self: Optional[Any] ) -> Tuple:
return MaMaaaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,encoder_layerdrop=self.encoder_layerdrop ,decoder_layerdrop=self.decoder_layerdrop ,max_position_embeddings=self.max_position_embeddings ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,)
def A__ ( self: Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.prepare_config_and_inputs()
return config, inputs_dict
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ : int = MaMaaaModel(config=lowerCamelCase_ ).get_decoder().to(lowerCamelCase_ ).eval()
UpperCAmelCase_ : str = inputs_dict["""input_ids"""]
UpperCAmelCase_ : List[Any] = inputs_dict["""attention_mask"""]
UpperCAmelCase_ : str = inputs_dict["""head_mask"""]
# first forward pass
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,head_mask=lowerCamelCase_ ,use_cache=lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : str = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor((self.batch_size, 3) ,2 )
# append to next input_ids and
UpperCAmelCase_ : Optional[int] = torch.cat([input_ids, next_tokens] ,dim=-1 )
UpperCAmelCase_ : str = torch.cat([attention_mask, next_attn_mask] ,dim=-1 )
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ )["""last_hidden_state"""]
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,past_key_values=lowerCamelCase_ )[
"""last_hidden_state"""
]
# select random slice
UpperCAmelCase_ : str = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
UpperCAmelCase_ : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 ) )
def A__ ( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = MaMaaaModel(config=lowerCamelCase_ ).to(lowerCamelCase_ ).eval()
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ )
UpperCAmelCase_ : Dict = outputs.encoder_last_hidden_state
UpperCAmelCase_ : Dict = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : Union[str, Any] = model.get_encoder()
encoder.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : int = MaMaaaEncoder.from_pretrained(lowerCamelCase_ ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = encoder(inputs_dict["""input_ids"""] ,attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : Optional[int] = model.get_decoder()
decoder.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = MaMaaaDecoder.from_pretrained(lowerCamelCase_ ).to(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] ,attention_mask=inputs_dict["""decoder_attention_mask"""] ,encoder_hidden_states=lowerCamelCase_ ,encoder_attention_mask=inputs_dict["""attention_mask"""] ,)[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class _snake_case ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : str = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
A__ : List[Any] = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
A__ : List[Any] = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
A__ : Dict = True
A__ : Union[str, Any] = True
A__ : Union[str, Any] = False
A__ : Any = False
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Any ) -> List[str]:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def A__ ( self: Tuple ) -> Optional[int]:
UpperCAmelCase_ : str = MaMaaaModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self ,config_class=lowerCamelCase_ )
def A__ ( self: Tuple ) -> Any:
self.config_tester.run_common_tests()
def A__ ( self: Union[str, Any] ) -> Tuple:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = model_class(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Any = model_class.from_pretrained(lowerCamelCase_ ,output_loading_info=lowerCamelCase_ )
self.assertEqual(info["""missing_keys"""] ,[] )
def A__ ( self: Dict ) -> List[Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase_ )
def A__ ( self: str ) -> Any:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowerCamelCase_ )
def A__ ( self: int ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
UpperCAmelCase_ : Dict = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
if not self.is_encoder_decoder:
UpperCAmelCase_ : str = inputs["""input_ids"""]
del inputs["input_ids"]
else:
UpperCAmelCase_ : Any = inputs["""input_ids"""]
UpperCAmelCase_ : Tuple = inputs.get("""decoder_input_ids""" ,lowerCamelCase_ )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" ,lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = model.get_input_embeddings()
if not self.is_encoder_decoder:
UpperCAmelCase_ : Optional[Any] = wte(lowerCamelCase_ )
else:
UpperCAmelCase_ : str = wte(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = wte(lowerCamelCase_ )
with torch.no_grad():
model(**lowerCamelCase_ )[0]
def A__ ( self: Dict ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ : int = input_dict["""input_ids"""]
UpperCAmelCase_ : Tuple = input_ids.ne(1 ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = MaMaaaForConditionalGeneration(lowerCamelCase_ ).eval().to(lowerCamelCase_ )
if torch_device == "cuda":
model.half()
model.generate(lowerCamelCase_ ,attention_mask=lowerCamelCase_ )
model.generate(num_beams=4 ,do_sample=lowerCamelCase_ ,early_stopping=lowerCamelCase_ ,num_return_sequences=3 )
def lowerCamelCase_ ( _a : List[Any] ):
'''simple docstring'''
return torch.tensor(_a , dtype=torch.long , device=_a )
UpperCamelCase_ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: List[str] ) -> List[str]:
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def A__ ( self: Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
UpperCAmelCase_ : int = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
UpperCAmelCase_ : Dict = prepare_mam_aaa_inputs_dict(model.config ,lowerCamelCase_ ,lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**lowerCamelCase_ )[0]
UpperCAmelCase_ : str = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape ,lowerCamelCase_ )
# change to expected output here
UpperCAmelCase_ : Union[str, Any] = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] ,device=lowerCamelCase_ )
self.assertTrue(torch.allclose(output[:, :3, :3] ,lowerCamelCase_ ,atol=lowerCamelCase_ ) )
def A__ ( self: Union[str, Any] ) -> Any:
UpperCAmelCase_ : List[Any] = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(lowerCamelCase_ )
# change to intended input
UpperCAmelCase_ : Any = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
UpperCAmelCase_ : Any = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
UpperCAmelCase_ : int = prepare_mam_aaa_inputs_dict(model.config ,lowerCamelCase_ ,lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ )[0]
UpperCAmelCase_ : List[str] = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape ,lowerCamelCase_ )
# change to expected output here
UpperCAmelCase_ : int = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] ,device=lowerCamelCase_ )
self.assertTrue(torch.allclose(output[:, :3, :3] ,lowerCamelCase_ ,atol=lowerCamelCase_ ) )
def A__ ( self: Tuple ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" ,src_lang="""fr""" ,tgt_lang="""en""" )
UpperCAmelCase_ : List[Any] = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
UpperCAmelCase_ : Any = tokenizer(lowerCamelCase_ ,padding=lowerCamelCase_ ,return_tensors="""pt""" )
UpperCAmelCase_ : Dict = model.generate(
input_ids=dct["""input_ids"""].to(lowerCamelCase_ ) ,attention_mask=dct["""attention_mask"""].to(lowerCamelCase_ ) ,num_beams=5 ,forced_bos_token_id=tokenizer.get_lang_id("""en""" ) ,)
UpperCAmelCase_ : str = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
UpperCAmelCase_ : Union[str, Any] = tokenizer.batch_decode(
hypotheses_batch.tolist() ,clean_up_tokenization_spaces=lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ )
assert generated == expected_en
| 59
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger()
@dataclass
class _snake_case :
'''simple docstring'''
A__ : nn.Module
A__ : List[nn.Module] = field(default_factory=__snake_case )
A__ : list = field(default_factory=__snake_case )
def A__ ( self: str ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tensor ,lowerCamelCase_: Tensor ) -> Optional[int]:
UpperCAmelCase_ : Dict = len(list(m.modules() ) ) == 1 or isinstance(lowerCamelCase_ ,nn.Convad ) or isinstance(lowerCamelCase_ ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCamelCase_ )
def __call__( self: Tuple ,lowerCamelCase_: Tensor ) -> Dict:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCamelCase_ )
[x.remove() for x in self.handles]
return self
@property
def A__ ( self: List[str] ) -> int:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda lowerCamelCase_ : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : nn.Module
A__ : nn.Module
A__ : int = 1
A__ : List = field(default_factory=__snake_case )
A__ : List = field(default_factory=__snake_case )
A__ : bool = True
def __call__( self: Tuple ,lowerCamelCase_: Tensor ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = Tracker(self.dest )(lowerCamelCase_ ).parametrized
UpperCAmelCase_ : Any = Tracker(self.src )(lowerCamelCase_ ).parametrized
UpperCAmelCase_ : int = list(filter(lambda lowerCamelCase_ : type(lowerCamelCase_ ) not in self.src_skip ,lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[int] = list(filter(lambda lowerCamelCase_ : type(lowerCamelCase_ ) not in self.dest_skip ,lowerCamelCase_ ) )
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ) and self.raise_if_mismatch:
raise Exception(
F'''Numbers of operations are different. Source module has {len(lowerCamelCase_ )} operations while'''
F''' destination module has {len(lowerCamelCase_ )}.''' )
for dest_m, src_m in zip(lowerCamelCase_ ,lowerCamelCase_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
class _snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] ,lowerCamelCase_: nn.Module ) -> List[str]:
super().__init__()
UpperCAmelCase_ : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("""conv1""", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("""block""" ), F'''Unexpected layer name {k}'''
UpperCAmelCase_ : Tuple = len(lowerCamelCase_ ) + 1
feature_blocks.append((F'''res{block_index}''', v) )
UpperCAmelCase_ : Optional[int] = nn.ModuleDict(lowerCamelCase_ )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tensor ) -> List[str]:
return get_trunk_forward_outputs(
lowerCamelCase_ ,out_feat_keys=lowerCamelCase_ ,feature_blocks=self._feature_blocks ,)
class _snake_case ( __snake_case ):
'''simple docstring'''
def A__ ( self: Dict ,lowerCamelCase_: str ) -> str:
UpperCAmelCase_ : str = x.split("""-""" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self: Union[str, Any] ,lowerCamelCase_: str ) -> Callable[[], Tuple[nn.Module, Dict]]:
# default to timm!
if x not in self:
UpperCAmelCase_ : str = self.convert_name_to_timm(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = partial(lambda: (timm.create_model(lowerCamelCase_ ,pretrained=lowerCamelCase_ ).eval(), None) )
else:
UpperCAmelCase_ : Optional[int] = super().__getitem__(lowerCamelCase_ )
return val
class _snake_case ( __snake_case ):
'''simple docstring'''
def __getitem__( self: Union[str, Any] ,lowerCamelCase_: str ) -> Callable[[], nn.Module]:
if "seer" in x and "in1k" not in x:
UpperCAmelCase_ : Tuple = RegNetModel
else:
UpperCAmelCase_ : Union[str, Any] = RegNetForImageClassification
return val
def lowerCamelCase_ ( _a : str , _a : int , _a : List[Tuple[str, str]] ):
'''simple docstring'''
for from_key, to_key in keys:
UpperCAmelCase_ : int = from_state_dict[from_key].clone()
print(F'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def lowerCamelCase_ ( _a : str , _a : Callable[[], nn.Module] , _a : Callable[[], nn.Module] , _a : RegNetConfig , _a : Path , _a : bool = True , ):
'''simple docstring'''
print(F'''Converting {name}...''' )
with torch.no_grad():
UpperCAmelCase_ , UpperCAmelCase_ : Any = from_model_func()
UpperCAmelCase_ : str = our_model_func(_a ).eval()
UpperCAmelCase_ : List[Any] = ModuleTransfer(src=_a , dest=_a , raise_if_mismatch=_a )
UpperCAmelCase_ : List[str] = torch.randn((1, 3, 224, 224) )
module_transfer(_a )
if from_state_dict is not None:
UpperCAmelCase_ : List[str] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
UpperCAmelCase_ : List[Any] = [("""0.clf.0.weight""", """classifier.1.weight"""), ("""0.clf.0.bias""", """classifier.1.bias""")]
UpperCAmelCase_ : str = manually_copy_vissl_head(_a , our_model.state_dict() , _a )
our_model.load_state_dict(_a )
UpperCAmelCase_ : Union[str, Any] = our_model(_a , output_hidden_states=_a )
UpperCAmelCase_ : int = (
our_outputs.logits if isinstance(_a , _a ) else our_outputs.last_hidden_state
)
UpperCAmelCase_ : Optional[int] = from_model(_a )
UpperCAmelCase_ : List[Any] = from_output[-1] if type(_a ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
UpperCAmelCase_ : Union[str, Any] = our_outputs.hidden_states[-1]
assert torch.allclose(_a , _a ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add model""" , use_temp_dir=_a , )
UpperCAmelCase_ : Union[str, Any] = 224 if """seer""" not in name else 384
# we can use the convnext one
UpperCAmelCase_ : Optional[int] = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" , size=_a )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add image processor""" , use_temp_dir=_a , )
print(F'''Pushed {name}''' )
def lowerCamelCase_ ( _a : Path , _a : str = None , _a : bool = True ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """imagenet-1k-id2label.json"""
UpperCAmelCase_ : List[Any] = 1000
UpperCAmelCase_ : Any = (1, num_labels)
UpperCAmelCase_ : Tuple = """huggingface/label-files"""
UpperCAmelCase_ : List[Any] = num_labels
UpperCAmelCase_ : List[str] = json.load(open(cached_download(hf_hub_url(_a , _a , repo_type="""dataset""" ) ) , """r""" ) )
UpperCAmelCase_ : Union[str, Any] = {int(_a ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Tuple = idalabel
UpperCAmelCase_ : Any = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Union[str, Any] = partial(_a , num_labels=_a , idalabel=_a , labelaid=_a )
UpperCAmelCase_ : List[Any] = {
"""regnet-x-002""": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="""x""" ),
"""regnet-x-004""": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="""x""" ),
"""regnet-x-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="""x""" ),
"""regnet-x-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="""x""" ),
"""regnet-x-016""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="""x""" ),
"""regnet-x-032""": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="""x""" ),
"""regnet-x-040""": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="""x""" ),
"""regnet-x-064""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="""x""" ),
"""regnet-x-080""": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="""x""" ),
"""regnet-x-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="""x""" ),
"""regnet-x-160""": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="""x""" ),
"""regnet-x-320""": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="""x""" ),
# y variant
"""regnet-y-002""": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"""regnet-y-004""": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"""regnet-y-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"""regnet-y-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"""regnet-y-016""": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"""regnet-y-032""": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"""regnet-y-040""": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"""regnet-y-064""": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"""regnet-y-080""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"""regnet-y-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"""regnet-y-160""": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"""regnet-y-320""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"""regnet-y-320-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"""regnet-y-640-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"""regnet-y-1280-seer""": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"""regnet-y-2560-seer""": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"""regnet-y-10b-seer""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
# finetuned on imagenet
"""regnet-y-320-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"""regnet-y-640-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"""regnet-y-1280-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"""regnet-y-2560-seer-in1k""": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"""regnet-y-10b-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
}
UpperCAmelCase_ : List[Any] = NameToOurModelFuncMap()
UpperCAmelCase_ : Union[str, Any] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_a : str , _a : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
UpperCAmelCase_ : Optional[Any] = torch.hub.load_state_dict_from_url(_a , model_dir=str(_a ) , map_location="""cpu""" )
UpperCAmelCase_ : Union[str, Any] = model_func()
# check if we have a head, if yes add it
UpperCAmelCase_ : Optional[Any] = files["""classy_state_dict"""]["""base_model"""]["""model"""]
UpperCAmelCase_ : Optional[Any] = model_state_dict["""trunk"""]
model.load_state_dict(_a )
return model.eval(), model_state_dict["heads"]
# pretrained
UpperCAmelCase_ : List[Any] = partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ : str = partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ : Tuple = partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
UpperCAmelCase_ : Optional[int] = partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
# IN1K finetuned
UpperCAmelCase_ : Dict = partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ : Dict = partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ : Any = partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
UpperCAmelCase_ : List[Any] = partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
if model_name:
convert_weight_and_push(
_a , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _a , _a , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_a , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _a , _a , _a , )
return config, expected_shape
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 59
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _lowercase ( unittest.TestCase ):
def __init__( self: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any=7 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: List[str]=18 , UpperCamelCase__: Union[str, Any]=30 , UpperCamelCase__: List[str]=400 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: List[Any]=True , UpperCamelCase__: List[Any]=False , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Any=[0.5, 0.5, 0.5] , UpperCamelCase__: Optional[Any]=[0.5, 0.5, 0.5] , ):
lowerCamelCase__ : int = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Union[str, Any] = image_size
lowerCamelCase__ : Optional[int] = min_resolution
lowerCamelCase__ : Optional[Any] = max_resolution
lowerCamelCase__ : Union[str, Any] = do_resize
lowerCamelCase__ : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 20}
lowerCamelCase__ : Dict = do_thumbnail
lowerCamelCase__ : Optional[int] = do_align_axis
lowerCamelCase__ : Any = do_pad
lowerCamelCase__ : Optional[Any] = do_normalize
lowerCamelCase__ : Union[str, Any] = image_mean
lowerCamelCase__ : Union[str, Any] = image_std
def lowerCamelCase_ ( self: str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowercase ( _lowercase , unittest.TestCase ):
a = DonutImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = DonutImageProcessingTester(self )
@property
def lowerCamelCase_ ( self: Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """size""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_thumbnail""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_align_long_axis""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_pad""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_std""" ) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
lowerCamelCase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def lowerCamelCase_ ( self: List[str] ):
pass
@is_flaky()
def lowerCamelCase_ ( self: Union[str, Any] ):
# Initialize image_processing
lowerCamelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
lowerCamelCase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCamelCase__ : List[str] = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def lowerCamelCase_ ( self: Optional[int] ):
# Initialize image_processing
lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
lowerCamelCase__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCamelCase__ : Optional[Any] = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def lowerCamelCase_ ( self: Dict ):
# Initialize image_processing
lowerCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
lowerCamelCase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCamelCase__ : Tuple = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 41
|
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = None
__a = BloomTokenizerFast
__a = BloomTokenizerFast
__a = True
__a = False
__a = """tokenizer_file"""
__a = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
super().setUp()
__UpperCAmelCase : Any = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : Any , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.get_rust_tokenizer()
__UpperCAmelCase : Optional[Any] = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
__UpperCAmelCase : int = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]]
__UpperCAmelCase : Dict = tokenizer.batch_encode_plus(UpperCamelCase )["""input_ids"""]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : int = tokenizer.batch_decode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : int , UpperCamelCase : Any=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__UpperCAmelCase : Dict = """This is a simple input"""
__UpperCAmelCase : str = ["""This is a simple input 1""", """This is a simple input 2"""]
__UpperCAmelCase : List[str] = ("""This is a simple input""", """This is a pair""")
__UpperCAmelCase : Dict = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.encode_plus(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.batch_encode_plus(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.encode(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.batch_encode_plus(UpperCamelCase , max_length=UpperCamelCase )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
__UpperCAmelCase : Union[str, Any] = None # Hotfixing padding = None
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" , )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
__UpperCAmelCase : Optional[Any] = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=UpperCamelCase )
__UpperCAmelCase : Optional[Any] = next(iter(UpperCamelCase ) )["""premise"""] # pick up one data
__UpperCAmelCase : Any = list(sample_data.values() )
__UpperCAmelCase : Optional[Any] = list(map(tokenizer.encode , UpperCamelCase ) )
__UpperCAmelCase : List[Any] = [tokenizer.decode(UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase ) for x in output_tokens]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 115
| 0
|
"""simple docstring"""
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Optional[Any]:
_a = data
_a = previous
_a = next_node
def __str__( self ) -> str:
return F'{self.data}'
def _UpperCAmelCase ( self ) -> int:
return self.data
def _UpperCAmelCase ( self ) -> str:
return self.next
def _UpperCAmelCase ( self ) -> Dict:
return self.previous
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase ) -> int:
_a = head
def __iter__( self ) -> Tuple:
return self
def _UpperCAmelCase ( self ) -> Dict:
if not self.current:
raise StopIteration
else:
_a = self.current.get_data()
_a = self.current.get_next()
return value
class __lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> Any:
_a = None # First node in list
_a = None # Last node in list
def __str__( self ) -> Optional[Any]:
_a = self.head
_a = []
while current is not None:
nodes.append(current.get_data() )
_a = current.get_next()
return " ".join(str(__UpperCAmelCase ) for node in nodes )
def __contains__( self , __UpperCAmelCase ) -> Dict:
_a = self.head
while current:
if current.get_data() == value:
return True
_a = current.get_next()
return False
def __iter__( self ) -> int:
return LinkedListIterator(self.head )
def _UpperCAmelCase ( self ) -> List[Any]:
if self.head:
return self.head.get_data()
return None
def _UpperCAmelCase ( self ) -> Dict:
if self.tail:
return self.tail.get_data()
return None
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> None:
if self.head is None:
_a = node
_a = node
else:
self.insert_before_node(self.head , __UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> None:
if self.head is None:
self.set_head(__UpperCAmelCase )
else:
self.insert_after_node(self.tail , __UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> None:
_a = Node(__UpperCAmelCase )
if self.head is None:
self.set_head(__UpperCAmelCase )
else:
self.set_tail(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
_a = node
_a = node.previous
if node.get_previous() is None:
_a = node_to_insert
else:
_a = node_to_insert
_a = node_to_insert
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
_a = node
_a = node.next
if node.get_next() is None:
_a = node_to_insert
else:
_a = node_to_insert
_a = node_to_insert
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
_a = 1
_a = Node(__UpperCAmelCase )
_a = self.head
while node:
if current_position == position:
self.insert_before_node(__UpperCAmelCase , __UpperCAmelCase )
return
current_position += 1
_a = node.next
self.insert_after_node(self.tail , __UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Node:
_a = self.head
while node:
if node.get_data() == item:
return node
_a = node.get_next()
raise Exception('''Node not found''' )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> int:
if (node := self.get_node(__UpperCAmelCase )) is not None:
if node == self.head:
_a = self.head.get_next()
if node == self.tail:
_a = self.tail.get_previous()
self.remove_node_pointers(__UpperCAmelCase )
@staticmethod
def _UpperCAmelCase ( __UpperCAmelCase ) -> None:
if node.get_next():
_a = node.previous
if node.get_previous():
_a = node.next
_a = None
_a = None
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return self.head is None
def A_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> str:
_a = '''ylacombe/bark-small'''
_a = tempfile.mkdtemp()
_a = '''en_speaker_1'''
_a = '''This is a test string'''
_a = '''speaker_embeddings_path.json'''
_a = '''speaker_embeddings'''
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> Tuple:
return AutoTokenizer.from_pretrained(self.checkpoint , **__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> str:
_a = self.get_tokenizer()
_a = BarkProcessor(tokenizer=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_a = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_a = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def _UpperCAmelCase ( self ) -> str:
_a = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_a = 35
_a = 2
_a = 8
_a = {
'''semantic_prompt''': np.ones(__UpperCAmelCase ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_a = processor(text=self.input_string , voice_preset=__UpperCAmelCase )
_a = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_a = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(__UpperCAmelCase , **__UpperCAmelCase )
_a = processor(text=self.input_string , voice_preset=__UpperCAmelCase )
_a = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_a = processor(text=self.input_string , voice_preset=self.voice_preset )
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.get_tokenizer()
_a = BarkProcessor(tokenizer=__UpperCAmelCase )
_a = processor(text=self.input_string )
_a = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 153
| 0
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=True , _lowerCamelCase=1 / 255 , _lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCAmelCase__ : List[str] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Tuple = batch_size
UpperCAmelCase__ : Optional[int] = num_channels
UpperCAmelCase__ : List[Any] = min_resolution
UpperCAmelCase__ : Tuple = max_resolution
UpperCAmelCase__ : Dict = do_resize
UpperCAmelCase__ : Union[str, Any] = size
UpperCAmelCase__ : str = do_normalize
UpperCAmelCase__ : List[str] = image_mean
UpperCAmelCase__ : Any = image_std
UpperCAmelCase__ : str = do_rescale
UpperCAmelCase__ : Dict = rescale_factor
UpperCAmelCase__ : Optional[Any] = do_pad
def snake_case__ ( self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=False):
if not batched:
UpperCAmelCase__ : Any = image_inputs[0]
if isinstance(_lowerCamelCase , Image.Image):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = image.size
else:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase__ : str = int(self.size["""shortest_edge"""] * h / w)
UpperCAmelCase__ : Any = self.size["""shortest_edge"""]
elif w > h:
UpperCAmelCase__ : Union[str, Any] = self.size["""shortest_edge"""]
UpperCAmelCase__ : List[str] = int(self.size["""shortest_edge"""] * w / h)
else:
UpperCAmelCase__ : Dict = self.size["""shortest_edge"""]
UpperCAmelCase__ : Any = self.size["""shortest_edge"""]
else:
UpperCAmelCase__ : Union[str, Any] = []
for image in image_inputs:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
UpperCAmelCase__ : List[Any] = max(_lowerCamelCase , key=lambda _lowerCamelCase: item[0])[0]
UpperCAmelCase__ : int = max(_lowerCamelCase , key=lambda _lowerCamelCase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class _snake_case ( a__ , unittest.TestCase ):
lowerCAmelCase :Union[str, Any] = DeformableDetrImageProcessor if is_vision_available() else None
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = DeformableDetrImageProcessingTester(self)
@property
def snake_case__ ( self):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowerCamelCase , """image_mean"""))
self.assertTrue(hasattr(_lowerCamelCase , """image_std"""))
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize"""))
self.assertTrue(hasattr(_lowerCamelCase , """do_resize"""))
self.assertTrue(hasattr(_lowerCamelCase , """do_rescale"""))
self.assertTrue(hasattr(_lowerCamelCase , """do_pad"""))
self.assertTrue(hasattr(_lowerCamelCase , """size"""))
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333})
self.assertEqual(image_processor.do_pad , _lowerCamelCase)
UpperCAmelCase__ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowerCamelCase)
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84})
self.assertEqual(image_processor.do_pad , _lowerCamelCase)
def snake_case__ ( self):
pass
def snake_case__ ( self):
# Initialize image_processing
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image)
# Test not batched input
UpperCAmelCase__ : Any = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.image_processor_tester.get_expected_values(_lowerCamelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase)
UpperCAmelCase__ : Tuple = image_processing(_lowerCamelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self):
# Initialize image_processing
UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray)
# Test not batched input
UpperCAmelCase__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : Optional[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""").pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self):
# Initialize image_processing
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor)
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""").pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case__ ( self):
# prepare image and target
UpperCAmelCase__ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""") as f:
UpperCAmelCase__ : int = json.loads(f.read())
UpperCAmelCase__ : int = {"""image_id""": 3_9769, """annotations""": target}
# encode them
UpperCAmelCase__ : int = DeformableDetrImageProcessor()
UpperCAmelCase__ : List[Any] = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , return_tensors="""pt""")
# verify pixel values
UpperCAmelCase__ : Tuple = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["""pixel_values"""].shape , _lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4))
# verify area
UpperCAmelCase__ : Tuple = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _lowerCamelCase))
# verify boxes
UpperCAmelCase__ : Optional[Any] = torch.Size([6, 4])
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _lowerCamelCase)
UpperCAmelCase__ : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _lowerCamelCase , atol=1e-3))
# verify image_id
UpperCAmelCase__ : str = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _lowerCamelCase))
# verify is_crowd
UpperCAmelCase__ : str = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _lowerCamelCase))
# verify class_labels
UpperCAmelCase__ : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _lowerCamelCase))
# verify orig_size
UpperCAmelCase__ : int = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _lowerCamelCase))
# verify size
UpperCAmelCase__ : str = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _lowerCamelCase))
@slow
def snake_case__ ( self):
# prepare image, target and masks_path
UpperCAmelCase__ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""") as f:
UpperCAmelCase__ : List[str] = json.loads(f.read())
UpperCAmelCase__ : List[str] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
UpperCAmelCase__ : str = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""")
# encode them
UpperCAmelCase__ : int = DeformableDetrImageProcessor(format="""coco_panoptic""")
UpperCAmelCase__ : Any = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , masks_path=_lowerCamelCase , return_tensors="""pt""")
# verify pixel values
UpperCAmelCase__ : int = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["""pixel_values"""].shape , _lowerCamelCase)
UpperCAmelCase__ : List[str] = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4))
# verify area
UpperCAmelCase__ : Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _lowerCamelCase))
# verify boxes
UpperCAmelCase__ : Union[str, Any] = torch.Size([6, 4])
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _lowerCamelCase)
UpperCAmelCase__ : Tuple = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _lowerCamelCase , atol=1e-3))
# verify image_id
UpperCAmelCase__ : Optional[int] = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _lowerCamelCase))
# verify is_crowd
UpperCAmelCase__ : int = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _lowerCamelCase))
# verify class_labels
UpperCAmelCase__ : Tuple = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _lowerCamelCase))
# verify masks
UpperCAmelCase__ : List[Any] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , _lowerCamelCase)
# verify orig_size
UpperCAmelCase__ : Dict = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _lowerCamelCase))
# verify size
UpperCAmelCase__ : Tuple = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _lowerCamelCase))
| 163
|
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__A =namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : List[Any] = _TestCommandArgs(dataset=UpperCamelCase__ , all_configs=UpperCamelCase__ , save_infos=UpperCamelCase__ )
UpperCAmelCase__ : Any = TestCommand(*UpperCamelCase__ )
test_command.run()
UpperCAmelCase__ : List[str] = os.path.join(UpperCamelCase__ , """README.md""" )
assert os.path.exists(UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = DatasetInfosDict.from_directory(UpperCamelCase__ )
UpperCAmelCase__ : Any = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2_3_5_1_5_6_3,
"""num_examples""": 1_0_0_0_0,
},
{
"""name""": """validation""",
"""num_bytes""": 2_3_8_4_1_8,
"""num_examples""": 1_0_0_0,
},
] , download_size=3_9_4_0_6_8_0 , dataset_size=2_5_8_9_9_8_1 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = getattr(dataset_infos["""default"""] , UpperCamelCase__ ), getattr(expected_dataset_infos["""default"""] , UpperCamelCase__ )
if key == "num_bytes":
assert is_apercent_close(UpperCamelCase__ , UpperCamelCase__ )
elif key == "splits":
assert list(UpperCamelCase__ ) == list(UpperCamelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 163
| 1
|
'''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : list[list[int]], _UpperCamelCase : int, _UpperCamelCase : int, _UpperCamelCase : set ) -> Any:
A_ ,A_ = len(SCREAMING_SNAKE_CASE_ ), len(grid[0] )
if (
min(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A_ = 0
count += depth_first_search(SCREAMING_SNAKE_CASE_, row + 1, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_, row - 1, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, col + 1, SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, col - 1, SCREAMING_SNAKE_CASE_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( _UpperCamelCase : Tuple, _UpperCamelCase : Tuple, _UpperCamelCase : List[str] ) -> int:
A_ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A_ = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
A_ = F'''{src_lang}-{tgt_lang}'''
A_ = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(_UpperCamelCase, exist_ok=_UpperCamelCase )
A_ = os.path.join(_UpperCamelCase, '''README.md''' )
print(F'''Generating {path}''' )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(_UpperCamelCase )
# make sure we are under the root of the project
__snake_case : Any = Path(__file__).resolve().parent.parent.parent
__snake_case : Tuple = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__snake_case , __snake_case , __snake_case : Any = model_name.split('-')
__snake_case : int = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 18
| 0
|
"""simple docstring"""
UpperCAmelCase__ : Any = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCAmelCase__ : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCAmelCase__ : Optional[int] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 25
|
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
requires_backends(self , """vision""" )
self.check_model_type(SCREAMING_SNAKE_CASE__ )
def __call__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
return {}, {}, {}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = load_image(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = image.size
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework )
return model_inputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model(**SCREAMING_SNAKE_CASE__ )
return model_outputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs.predicted_depth
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prediction.squeeze().cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = (output * 2_55 / np.max(SCREAMING_SNAKE_CASE__ )).astype("""uint8""" )
SCREAMING_SNAKE_CASE__ : List[str] = Image.fromarray(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : Any = predicted_depth
SCREAMING_SNAKE_CASE__ : Dict = depth
return output_dict
| 25
| 1
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = LEDTokenizer
_UpperCAmelCase : str = LEDTokenizerFast
_UpperCAmelCase : Tuple = True
def A ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
_snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_snake_case = dict(zip(lowercase , range(len(lowercase ) ) ) )
_snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case = {'unk_token': '<unk>'}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowercase ) )
def A ( self : List[str] , **lowercase : Any ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : List[Any] , **lowercase : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Optional[Any] , lowercase : Any ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def A ( self : Dict ):
'''simple docstring'''
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def A ( self : Dict ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowercase , max_length=len(lowercase ) , padding=lowercase , return_tensors='pt' )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase , lowercase )
@require_torch
def A ( self : int ):
'''simple docstring'''
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors='pt' )
self.assertIn('input_ids' , lowercase )
self.assertIn('attention_mask' , lowercase )
self.assertNotIn('labels' , lowercase )
self.assertNotIn('decoder_attention_mask' , lowercase )
@require_torch
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(text_target=lowercase , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def A ( self : Any ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(
['I am a small frog' * 1_024, 'I am a small frog'] , padding=lowercase , truncation=lowercase , return_tensors='pt' )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(batch.input_ids.shape , (2, 5_122) )
@require_torch
def A ( self : Any ):
'''simple docstring'''
_snake_case = ['A long paragraph for summarization.']
_snake_case = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowercase , return_tensors='pt' )
_snake_case = tokenizer(text_target=lowercase , return_tensors='pt' )
_snake_case = inputs['input_ids']
_snake_case = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def A ( self : Optional[int] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = ['Summary of the text.', 'Another summary.']
_snake_case = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_snake_case = tokenizer(lowercase , padding=lowercase )
_snake_case = [[0] * len(lowercase ) for x in encoded_output['input_ids']]
_snake_case = tokenizer.pad(lowercase )
self.assertSequenceEqual(outputs['global_attention_mask'] , lowercase )
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : Optional[Any] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
_snake_case = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
_snake_case = 'A, <mask> AllenNLP sentence.'
_snake_case = tokenizer_r.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
_snake_case = tokenizer_p.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowercase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowercase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 130
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = "canine"
def __init__( self : int , lowercase : Optional[int]=768 , lowercase : Tuple=12 , lowercase : Union[str, Any]=12 , lowercase : Optional[int]=3_072 , lowercase : Tuple="gelu" , lowercase : Optional[Any]=0.1 , lowercase : Tuple=0.1 , lowercase : int=16_384 , lowercase : Optional[int]=16 , lowercase : Optional[int]=0.02 , lowercase : Optional[Any]=1E-12 , lowercase : Optional[Any]=0 , lowercase : Dict=0xE000 , lowercase : Optional[Any]=0xE001 , lowercase : Union[str, Any]=4 , lowercase : str=4 , lowercase : Optional[int]=8 , lowercase : List[str]=16_384 , lowercase : Union[str, Any]=128 , **lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = type_vocab_size
_snake_case = layer_norm_eps
# Character config:
_snake_case = downsampling_rate
_snake_case = upsampling_kernel_size
_snake_case = num_hash_functions
_snake_case = num_hash_buckets
_snake_case = local_transformer_stride
| 130
| 1
|
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def UpperCamelCase ( __lowerCamelCase : List[Any] ):
return 1.0 / (1.0 + np.exp(-_outputs ))
def UpperCamelCase ( __lowerCamelCase : int ):
snake_case : Tuple = np.max(_outputs , axis=-1 , keepdims=__lowerCamelCase )
snake_case : int = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowerCamelCase )
class UpperCAmelCase ( A_ ):
A__ : Any = "sigmoid"
A__ : str = "softmax"
A__ : int = "none"
@add_end_docstrings(
A_ ,r"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " ,)
class UpperCAmelCase ( A_ ):
A__ : int = False
A__ : Union[str, Any] = ClassificationFunction.NONE
def __init__(self : List[str] , **snake_case__ : int ) -> str:
'''simple docstring'''
super().__init__(**snake_case__ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : Union[str, Any]="" , **snake_case__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = tokenizer_kwargs
snake_case : List[Any] = {}
if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None:
snake_case : Optional[int] = self.model.config.return_all_scores
if isinstance(snake_case__ , snake_case__ ) or top_k is None:
snake_case : List[Any] = top_k
snake_case : str = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , snake_case__ , )
if return_all_scores:
snake_case : List[str] = None
else:
snake_case : Optional[int] = 1
if isinstance(snake_case__ , snake_case__ ):
snake_case : Dict = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
snake_case : Optional[int] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__(self : Dict , *snake_case__ : List[str] , **snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[int] = super().__call__(*snake_case__ , **snake_case__ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
snake_case : Tuple = "top_k" not in kwargs
if isinstance(args[0] , snake_case__ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Tuple , **snake_case__ : Union[str, Any] ) -> Dict[str, GenericTensor]:
'''simple docstring'''
snake_case : int = self.framework
if isinstance(snake_case__ , snake_case__ ):
return self.tokenizer(**snake_case__ , return_tensors=snake_case__ , **snake_case__ )
elif isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) == 1 and isinstance(inputs[0] , snake_case__ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=snake_case__ , **snake_case__ )
elif isinstance(snake_case__ , snake_case__ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Union[str, Any] ) -> int:
'''simple docstring'''
return self.model(**snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Optional[Any] , snake_case__ : List[str]=None , snake_case__ : Dict=1 , snake_case__ : Tuple=True ) -> str:
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
snake_case : Tuple = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
snake_case : Tuple = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None:
snake_case : Tuple = self.model.config.function_to_apply
else:
snake_case : int = ClassificationFunction.NONE
snake_case : Any = model_outputs["logits"][0]
snake_case : List[str] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
snake_case : Optional[Any] = sigmoid(snake_case__ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
snake_case : Union[str, Any] = softmax(snake_case__ )
elif function_to_apply == ClassificationFunction.NONE:
snake_case : Optional[Any] = outputs
else:
raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
snake_case : Optional[int] = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(snake_case__ )
]
if not _legacy:
dict_scores.sort(key=lambda snake_case__ : x["score"] , reverse=snake_case__ )
if top_k is not None:
snake_case : Optional[int] = dict_scores[:top_k]
return dict_scores
| 59
|
__lowerCamelCase = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.602_176_634e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.35_5818,
}
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : float ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
snake_case : List[Any] = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {', '.join(__lowerCamelCase )}"""
)
raise ValueError(__lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59
| 1
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase_ = logging.getLogger(__name__)
def __lowerCAmelCase ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int ):
'''simple docstring'''
return (preds == labels).mean()
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A : Optional[str] = field(
default=__UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A : Optional[str] = field(
default=__UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A : Optional[str] = field(
default=__UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
A : str = field(metadata={"help": "Should contain the data files for the task."} )
A : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A : bool = field(
default=__UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__snake_case : int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , __SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
try:
__snake_case : int = processors[data_args.task_name]()
__snake_case : Optional[Any] = processor.get_labels()
__snake_case : Union[str, Any] = len(__SCREAMING_SNAKE_CASE )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__SCREAMING_SNAKE_CASE , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__snake_case : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
__snake_case : str = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__snake_case : str = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__UpperCAmelCase : EvalPrediction ) -> Dict:
__snake_case : int = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__SCREAMING_SNAKE_CASE , p.label_ids )}
# Data collator
__snake_case : int = DataCollatorWithPadding(__SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__snake_case : List[str] = Trainer(
model=__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , train_dataset=__SCREAMING_SNAKE_CASE , eval_dataset=__SCREAMING_SNAKE_CASE , compute_metrics=__SCREAMING_SNAKE_CASE , data_collator=__SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case : List[str] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__snake_case : Dict = trainer.evaluate()
__snake_case : int = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(__SCREAMING_SNAKE_CASE , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
writer.write("""%s = %s\n""" % (key, value) )
results.update(__SCREAMING_SNAKE_CASE )
return results
def __lowerCAmelCase ( __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 367
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[Any] = "encodec"
def __init__( self : Tuple , _lowerCAmelCase : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , _lowerCAmelCase : Tuple=2_40_00 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : int=1_28 , _lowerCAmelCase : List[Any]=32 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Union[str, Any]=[8, 5, 4, 2] , _lowerCAmelCase : str="weight_norm" , _lowerCAmelCase : Tuple=7 , _lowerCAmelCase : str=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : int=2 , _lowerCAmelCase : str=True , _lowerCAmelCase : Dict="reflect" , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : int=1.0 , _lowerCAmelCase : Optional[int]=10_24 , _lowerCAmelCase : int=None , _lowerCAmelCase : List[str]=True , **_lowerCAmelCase : List[Any] , ):
__snake_case : Optional[int] = target_bandwidths
__snake_case : int = sampling_rate
__snake_case : List[Any] = audio_channels
__snake_case : str = normalize
__snake_case : Union[str, Any] = chunk_length_s
__snake_case : Union[str, Any] = overlap
__snake_case : Union[str, Any] = hidden_size
__snake_case : Union[str, Any] = num_filters
__snake_case : Optional[Any] = num_residual_layers
__snake_case : List[Any] = upsampling_ratios
__snake_case : List[str] = norm_type
__snake_case : Union[str, Any] = kernel_size
__snake_case : Optional[int] = last_kernel_size
__snake_case : Optional[Any] = residual_kernel_size
__snake_case : Dict = dilation_growth_rate
__snake_case : int = use_causal_conv
__snake_case : Tuple = pad_mode
__snake_case : str = compress
__snake_case : Optional[Any] = num_lstm_layers
__snake_case : List[Any] = trim_right_ratio
__snake_case : Any = codebook_size
__snake_case : int = codebook_dim if codebook_dim is not None else hidden_size
__snake_case : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**_lowerCAmelCase )
@property
def snake_case__ ( self : int ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case__ ( self : int ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def snake_case__ ( self : Union[str, Any] ):
__snake_case : List[str] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def snake_case__ ( self : Tuple ):
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 20
| 0
|
"""simple docstring"""
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> None:
lowerCAmelCase_ :str = len(__a )
lowerCAmelCase_ :Union[str, Any] = [0] * len_array
if len_array > 0:
lowerCAmelCase_ :List[Any] = array[0]
for i in range(1 , __a ):
lowerCAmelCase_ :Dict = self.prefix_sum[i - 1] + array[i]
def __lowerCAmelCase ( self , __A , __A ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __lowerCAmelCase ( self , __A ) -> bool:
lowerCAmelCase_ :int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class _lowerCamelCase ( _lowercase ):
def __init__(self , *__a , **__a ) -> None:
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 153
| 0
|
snake_case__ : Optional[int] = tuple[float, float, float]
snake_case__ : int = tuple[float, float, float]
def _a ( lowerCamelCase: Pointad , lowerCamelCase: Pointad ) -> Vectorad:
'''simple docstring'''
__A = end_pointa[0] - end_pointa[0]
__A = end_pointa[1] - end_pointa[1]
__A = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _a ( lowerCamelCase: Vectorad , lowerCamelCase: Vectorad ) -> Vectorad:
'''simple docstring'''
__A = ab[1] * ac[2] - ab[2] * ac[1] # *i
__A = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__A = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _a ( lowerCamelCase: Vectorad , lowerCamelCase: int ) -> bool:
'''simple docstring'''
return tuple(round(lowerCamelCase , lowerCamelCase ) for x in vector ) == (0, 0, 0)
def _a ( lowerCamelCase: Pointad , lowerCamelCase: Pointad , lowerCamelCase: Pointad , lowerCamelCase: int = 10 ) -> bool:
'''simple docstring'''
__A = create_vector(lowerCamelCase , lowerCamelCase )
__A = create_vector(lowerCamelCase , lowerCamelCase )
return is_zero_vector(get_ad_vectors_cross(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
| 250
|
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
snake_case__ : Dict = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
snake_case__ : Any = 'main'
# Default branch name
snake_case__ : Union[str, Any] = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
snake_case__ : Optional[int] = 'aaaaaaa'
# This commit does not exist, so we should 404.
snake_case__ : int = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
snake_case__ : Any = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def _a ( ) -> Tuple:
'''simple docstring'''
print('''Welcome!''' )
yield
print('''Bye!''' )
@contextlib.contextmanager
def _a ( ) -> Optional[int]:
'''simple docstring'''
print('''Bonjour!''' )
yield
print('''Au revoir!''' )
class A_ ( unittest.TestCase ):
def _lowerCAmelCase (self :Any )-> Optional[Any]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class A_ ( unittest.TestCase ):
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def _lowerCAmelCase (self :str , _UpperCamelCase :str )-> Optional[int]:
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :List[Any] )-> Union[str, Any]:
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def _lowerCAmelCase (self :int , _UpperCamelCase :Union[str, Any] )-> int:
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def _lowerCAmelCase (self :int )-> str:
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['''start_positions''', '''end_positions'''] )
class A_ ( _lowerCamelCase ):
pass
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] )
@require_tf
def _lowerCAmelCase (self :Any )-> str:
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['''start_positions''', '''end_positions'''] )
class A_ ( _lowerCamelCase ):
pass
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] )
@require_flax
def _lowerCAmelCase (self :Optional[int] )-> Dict:
# Flax models don't have labels
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
class A_ ( _lowerCamelCase ):
pass
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
| 250
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : List[Any] =KandinskyVaaInpaintPipeline
lowercase_ : Union[str, Any] =['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
lowercase_ : Optional[int] =[
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
lowercase_ : Union[str, Any] =[
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowercase_ : Tuple =False
@property
def A__ ( self):
return 3_2
@property
def A__ ( self):
return 3_2
@property
def A__ ( self):
return self.time_input_dim
@property
def A__ ( self):
return self.time_input_dim * 4
@property
def A__ ( self):
return 1_0_0
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase = UNetaDConditionModel(**A__)
return model
@property
def A__ ( self):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = VQModel(**self.dummy_movq_kwargs)
return model
def A__ ( self):
lowercase = self.dummy_unet
lowercase = self.dummy_movq
lowercase = DDIMScheduler(
num_train_timesteps=1_0_0_0 ,beta_schedule='''linear''' ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=A__ ,set_alpha_to_one=A__ ,steps_offset=1 ,prediction_type='''epsilon''' ,thresholding=A__ ,)
lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def A__ ( self ,A__ ,A__=0):
lowercase = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(A__)).to(A__)
lowercase = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1)).to(
A__)
# create init_image
lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(A__)).to(A__)
lowercase = image.cpu().permute(0 ,2 ,3 ,1)[0]
lowercase = Image.fromarray(np.uinta(A__)).convert('''RGB''').resize((2_5_6, 2_5_6))
# create mask
lowercase = np.ones((6_4, 6_4) ,dtype=np.floataa)
lowercase = 0
if str(A__).startswith('''mps'''):
lowercase = torch.manual_seed(A__)
else:
lowercase = torch.Generator(device=A__).manual_seed(A__)
lowercase = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def A__ ( self):
lowercase = '''cpu'''
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**A__)
lowercase = pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = pipe(**self.get_dummy_inputs(A__))
lowercase = output.images
lowercase = pipe(
**self.get_dummy_inputs(A__) ,return_dict=A__ ,)[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}')
assert image.shape == (1, 6_4, 6_4, 3)
lowercase = np.array(
[0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def A__ ( self):
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def A__ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''')
lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''')
lowercase = np.ones((7_6_8, 7_6_8) ,dtype=np.floataa)
lowercase = 0
lowercase = '''a hat'''
lowercase = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' ,torch_dtype=torch.floataa)
pipe_prior.to(A__)
lowercase = KandinskyVaaInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder-inpaint''' ,torch_dtype=torch.floataa)
lowercase = pipeline.to(A__)
pipeline.set_progress_bar_config(disable=A__)
lowercase = torch.Generator(device='''cpu''').manual_seed(0)
lowercase , lowercase = pipe_prior(
A__ ,generator=A__ ,num_inference_steps=5 ,negative_prompt='''''' ,).to_tuple()
lowercase = pipeline(
image=A__ ,mask_image=A__ ,image_embeds=A__ ,negative_image_embeds=A__ ,generator=A__ ,num_inference_steps=1_0_0 ,height=7_6_8 ,width=7_6_8 ,output_type='''np''' ,)
lowercase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(A__ ,A__)
| 101
|
from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCAmelCase: Any = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
_lowercase : str = []
for num in range(len(__UpperCAmelCase ) ):
_lowercase : int = 0
while 2 * i * i <= odd_composites[num]:
_lowercase : int = odd_composites[num] - 2 * i * i
if is_prime(__UpperCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__UpperCAmelCase ) == n:
return list_nums
return []
def __SCREAMING_SNAKE_CASE ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 369
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336
| 0
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
for char in word:
lowercase__ : Optional[Any] = ord(lowerCamelCase__ )
if not _is_chinese_char(lowerCamelCase__ ):
return 0
return 1
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = set()
for token in tokens:
lowercase__ : List[str] = len(lowerCamelCase__ ) > 1 and is_chinese(lowerCamelCase__ )
if chinese_word:
word_set.add(lowerCamelCase__ )
lowercase__ : int = list(lowerCamelCase__ )
return word_list
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
lowercase__ : Optional[int] = max([len(lowerCamelCase__ ) for w in chinese_word_set] )
lowercase__ : Optional[int] = bert_tokens
lowercase__ , lowercase__ : str = 0, len(lowerCamelCase__ )
while start < end:
lowercase__ : int = True
if is_chinese(bert_word[start] ):
lowercase__ : List[str] = min(end - start , lowerCamelCase__ )
for i in range(lowerCamelCase__ , 1 , -1 ):
lowercase__ : Optional[Any] = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowercase__ : str = "##" + bert_word[j]
lowercase__ : Optional[Any] = start + i
lowercase__ : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = []
for i in range(0 , len(lowerCamelCase__ ) , 100 ):
lowercase__ : List[str] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["cws"] ).cws
lowercase__ : str = [get_chinese_word(lowerCamelCase__ ) for r in res]
ltp_res.extend(lowerCamelCase__ )
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
lowercase__ : Optional[int] = []
for i in range(0 , len(lowerCamelCase__ ) , 100 ):
lowercase__ : List[str] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
lowercase__ : Dict = []
for input_ids, chinese_word in zip(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : Optional[int] = []
for id in input_ids:
lowercase__ : int = bert_tokenizer._convert_id_to_token(lowerCamelCase__ )
input_tokens.append(lowerCamelCase__ )
lowercase__ : Optional[int] = add_sub_symbol(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Any = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase__ ):
if token[:2] == "##":
lowercase__ : Tuple = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase__ ) == 1 and _is_chinese_char(ord(lowerCamelCase__ ) ):
ref_id.append(lowerCamelCase__ )
ref_ids.append(lowerCamelCase__ )
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
return ref_ids
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
with open(args.file_name , "r" , encoding="utf-8" ) as f:
lowercase__ : Union[str, Any] = f.readlines()
lowercase__ : Tuple = [line.strip() for line in data if len(lowerCamelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase__ : Union[str, Any] = LTP(args.ltp ) # faster in GPU device
lowercase__ : int = BertTokenizer.from_pretrained(args.bert )
lowercase__ : int = prepare_ref(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
lowercase__ : Optional[int] = [json.dumps(lowerCamelCase__ ) + "\n" for ref in ref_ids]
f.writelines(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
lowerCAmelCase__ = parser.parse_args()
main(args)
| 130
|
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] ):
lowercase__ : str = tempfile.mkdtemp()
lowercase__ : Optional[Any] = 8
# DPR tok
lowercase__ : Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase__ : List[Any] = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowercase__ : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : List[str] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : List[Any] = {"unk_token": "<unk>"}
lowercase__ : Any = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = os.path.join(SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Any ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def snake_case ( self : Any ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def snake_case ( self : Any ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def snake_case ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Optional[int] ):
lowercase__ : int = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def snake_case ( self : List[str] ):
lowercase__ : Union[str, Any] = self.get_dummy_dataset()
lowercase__ : str = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Union[str, Any] = dataset
lowercase__ : List[str] = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : bool ):
lowercase__ : Union[str, Any] = self.get_dummy_dataset()
lowercase__ : Optional[int] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
lowercase__ : Any = os.path.join(self.tmpdirname , "dataset" )
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
lowercase__ : Tuple = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowercase__ : Dict = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE ) , )
return retriever
def snake_case ( self : Tuple ):
lowercase__ : Optional[int] = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
lowercase__ : Optional[int] = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
lowercase__ : List[str] = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(SCREAMING_SNAKE_CASE , open(SCREAMING_SNAKE_CASE , "wb" ) )
lowercase__ : int = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
lowercase__ : Any = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def snake_case ( self : int ):
lowercase__ : Any = 1
lowercase__ : str = self.get_dummy_canonical_hf_index_retriever()
lowercase__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case ( self : str ):
lowercase__ : Dict = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple = self.get_dummy_dataset()
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : List[str] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def snake_case ( self : str ):
lowercase__ : Union[str, Any] = 1
lowercase__ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : str = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : str = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[Any] = 1
lowercase__ : str = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ : Dict = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case ( self : List[str] ):
lowercase__ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : int = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Dict = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[Any] = 1
lowercase__ : List[str] = self.get_dummy_legacy_index_retriever()
lowercase__ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ : str = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case ( self : Dict ):
lowercase__ : Optional[int] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : str = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def snake_case ( self : Any ):
import torch
lowercase__ : List[Any] = 1
lowercase__ : Union[str, Any] = self.get_dummy_canonical_hf_index_retriever()
lowercase__ : Tuple = [[5, 7], [10, 11]]
lowercase__ : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : int = retriever(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ , lowercase__ : List[str] = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
lowercase__ : List[str] = retriever(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE , return_tensors="pt" , )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def snake_case ( self : int ):
lowercase__ : List[Any] = self.get_dpr_ctx_encoder_tokenizer()
lowercase__ : Optional[int] = 1
lowercase__ : str = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
retriever.set_ctx_encoder_tokenizer(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = [[5, 7], [10, 11]]
lowercase__ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : List[Any] = retriever(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(
len(SCREAMING_SNAKE_CASE ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , SCREAMING_SNAKE_CASE ) # check for doc token related keys in dictionary.
| 130
| 1
|
import os
def _A ( SCREAMING_SNAKE_CASE : str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) ) as input_file:
a__ : List[Any] =[
[int(SCREAMING_SNAKE_CASE ) for element in line.split("," )]
for line in input_file.readlines()
]
a__ : str =len(SCREAMING_SNAKE_CASE )
a__ : Dict =len(matrix[0] )
a__ : str =[[-1 for _ in range(SCREAMING_SNAKE_CASE )] for _ in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
a__ : List[Any] =matrix[i][0]
for j in range(1 , SCREAMING_SNAKE_CASE ):
for i in range(SCREAMING_SNAKE_CASE ):
a__ : List[Any] =minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , SCREAMING_SNAKE_CASE ):
a__ : Dict =min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
a__ : List[str] =min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 148
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
UpperCAmelCase : Union[str, Any] = 1.054571817E-34 # unit of ℏ : J * s
UpperCAmelCase : Union[str, Any] = 3E8 # unit of c : m * s^-1
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
a__ : Tuple =(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
a__ : Any =(240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
a__ : List[str] =(
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148
| 1
|
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
lowerCAmelCase_ : int = str(bin(SCREAMING_SNAKE_CASE__ ) )[2:] # remove the leading "0b"
lowerCAmelCase_ : Tuple = str(bin(SCREAMING_SNAKE_CASE__ ) )[2:] # remove the leading "0b"
lowerCAmelCase_ : Optional[int] = max(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE__ ) , b_binary.zfill(SCREAMING_SNAKE_CASE__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 241
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Any = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __snake_case ( lowerCAmelCase ):
_a : Union[str, Any]= "visual_bert"
def __init__( self ,snake_case=30522 ,snake_case=768 ,snake_case=512 ,snake_case=12 ,snake_case=12 ,snake_case=3072 ,snake_case="gelu" ,snake_case=0.1 ,snake_case=0.1 ,snake_case=512 ,snake_case=2 ,snake_case=0.02 ,snake_case=1e-12 ,snake_case=False ,snake_case=True ,snake_case=1 ,snake_case=0 ,snake_case=2 ,**snake_case ,):
'''simple docstring'''
super().__init__(pad_token_id=snake_case ,bos_token_id=snake_case ,eos_token_id=snake_case ,**snake_case )
lowercase : Tuple = vocab_size
lowercase : int = max_position_embeddings
lowercase : Optional[Any] = hidden_size
lowercase : int = visual_embedding_dim
lowercase : Tuple = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : Optional[Any] = intermediate_size
lowercase : str = hidden_act
lowercase : Tuple = hidden_dropout_prob
lowercase : List[Any] = attention_probs_dropout_prob
lowercase : Union[str, Any] = initializer_range
lowercase : int = type_vocab_size
lowercase : Union[str, Any] = layer_norm_eps
lowercase : Union[str, Any] = bypass_transformer
lowercase : int = special_visual_initialize
| 20
| 0
|
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def UpperCamelCase_ ( A__ : List[Any] , A__ : Optional[int] ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
lowerCAmelCase_ : Dict = Image.open(requests.get(A__ , stream=A__ ).raw ).convert("""RGB""" )
lowerCAmelCase_ : List[str] = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ),
] )
lowerCAmelCase_ : List[Any] = transform(A__ ).unsqueeze(0 ).to(A__ )
return image
def UpperCamelCase_ ( A__ : List[Any] ):
'''simple docstring'''
if "visual_encoder" in key:
lowerCAmelCase_ : Dict = re.sub("""visual_encoder*""" , """vision_model.encoder""" , A__ )
if "blocks" in key:
lowerCAmelCase_ : List[Any] = re.sub(R"""blocks""" , """layers""" , A__ )
if "attn" in key:
lowerCAmelCase_ : Union[str, Any] = re.sub(R"""attn""" , """self_attn""" , A__ )
if "norm1" in key:
lowerCAmelCase_ : Dict = re.sub(R"""norm1""" , """layer_norm1""" , A__ )
if "norm2" in key:
lowerCAmelCase_ : str = re.sub(R"""norm2""" , """layer_norm2""" , A__ )
if "encoder.norm" in key:
lowerCAmelCase_ : Optional[Any] = re.sub(R"""encoder.norm""" , """post_layernorm""" , A__ )
if "encoder.patch_embed.proj" in key:
lowerCAmelCase_ : Optional[int] = re.sub(R"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , A__ )
if "encoder.pos_embed" in key:
lowerCAmelCase_ : Tuple = re.sub(R"""encoder.pos_embed""" , """embeddings.position_embedding""" , A__ )
if "encoder.cls_token" in key:
lowerCAmelCase_ : List[str] = re.sub(R"""encoder.cls_token""" , """embeddings.class_embedding""" , A__ )
if "self_attn" in key:
lowerCAmelCase_ : Optional[int] = re.sub(R"""self_attn.proj""" , """self_attn.projection""" , A__ )
return key
@torch.no_grad()
def UpperCamelCase_ ( A__ : int , A__ : Tuple=None ):
'''simple docstring'''
if config_path is not None:
lowerCAmelCase_ : List[Any] = BlipConfig.from_pretrained(A__ )
else:
lowerCAmelCase_ : Tuple = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
lowerCAmelCase_ : Optional[Any] = BlipForConditionalGeneration(A__ ).eval()
lowerCAmelCase_ : Any = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
lowerCAmelCase_ : str = blip_decoder(pretrained=A__ , image_size=3_84 , vit="""base""" )
lowerCAmelCase_ : Optional[Any] = pt_model.eval()
lowerCAmelCase_ : int = pt_model.state_dict()
for key in modified_state_dict.copy():
lowerCAmelCase_ : Dict = modified_state_dict.pop(A__ )
lowerCAmelCase_ : Optional[Any] = rename_key(A__ )
lowerCAmelCase_ : Dict = value
hf_model.load_state_dict(A__ )
lowerCAmelCase_ : List[Any] = 3_84
lowerCAmelCase_ : Optional[Any] = load_demo_image(image_size=A__ , device="""cpu""" )
lowerCAmelCase_ : List[Any] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowerCAmelCase_ : Optional[int] = tokenizer(["""a picture of"""] ).input_ids
lowerCAmelCase_ : Any = hf_model.generate(A__ , A__ )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
lowerCAmelCase_ : Tuple = hf_model.generate(A__ )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(A__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowerCAmelCase_ : List[str] = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
lowerCAmelCase_ : str = blip_vqa(pretrained=A__ , image_size=A__ , vit="""base""" )
vqa_model.eval()
lowerCAmelCase_ : Optional[Any] = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowerCAmelCase_ : Optional[int] = modified_state_dict.pop(A__ )
lowerCAmelCase_ : List[Any] = rename_key(A__ )
lowerCAmelCase_ : Dict = value
lowerCAmelCase_ : Any = BlipForQuestionAnswering(A__ )
hf_vqa_model.load_state_dict(A__ )
lowerCAmelCase_ : List[str] = ["""How many dogs are in this image?"""]
lowerCAmelCase_ : int = tokenizer(A__ , return_tensors="""pt""" ).input_ids
lowerCAmelCase_ : Optional[int] = hf_vqa_model.generate(A__ , A__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
lowerCAmelCase_ : List[str] = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
lowerCAmelCase_ : Any = blip_itm(pretrained=A__ , image_size=A__ , vit="""base""" )
itm_model.eval()
lowerCAmelCase_ : Tuple = itm_model.state_dict()
for key in modified_state_dict.copy():
lowerCAmelCase_ : int = modified_state_dict.pop(A__ )
lowerCAmelCase_ : Optional[int] = rename_key(A__ )
lowerCAmelCase_ : Tuple = value
lowerCAmelCase_ : Any = BlipForImageTextRetrieval(A__ )
lowerCAmelCase_ : Union[str, Any] = ["""A picture of a woman with a dog sitting in a beach"""]
lowerCAmelCase_ : int = tokenizer(
A__ , return_tensors="""pt""" , padding="""max_length""" , truncation=A__ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(A__ )
hf_itm_model.eval()
lowerCAmelCase_ : Optional[int] = hf_itm_model(A__ , A__ , use_itm_head=A__ )
lowerCAmelCase_ : Optional[Any] = hf_itm_model(A__ , A__ , use_itm_head=A__ )
assert out[0].item() == 0.2110687494277954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
__A : Union[str, Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 358
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase_ ( A__ : List[Any] , A__ : str=False ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("""head""" ):
lowerCAmelCase_ : str = """segformer.encoder.""" + key
if key.startswith("""backbone""" ):
lowerCAmelCase_ : str = key.replace("""backbone""" , """segformer.encoder""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase_ : List[str] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
lowerCAmelCase_ : List[Any] = key.replace(f'patch_embed{idx}' , f'patch_embeddings.{int(A__ )-1}' )
if "norm" in key:
lowerCAmelCase_ : Any = key.replace("""norm""" , """layer_norm""" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase_ : Tuple = key[key.find("""segformer.encoder.layer_norm""" ) + len("""segformer.encoder.layer_norm""" )]
lowerCAmelCase_ : int = key.replace(f'layer_norm{idx}' , f'layer_norm.{int(A__ )-1}' )
if "layer_norm1" in key:
lowerCAmelCase_ : str = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
lowerCAmelCase_ : Union[str, Any] = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase_ : Any = key[key.find("""block""" ) + len("""block""" )]
lowerCAmelCase_ : str = key.replace(f'block{idx}' , f'block.{int(A__ )-1}' )
if "attn.q" in key:
lowerCAmelCase_ : List[Any] = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
lowerCAmelCase_ : Optional[int] = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
lowerCAmelCase_ : str = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
lowerCAmelCase_ : Optional[Any] = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
lowerCAmelCase_ : Optional[Any] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
lowerCAmelCase_ : List[Any] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
lowerCAmelCase_ : Optional[Any] = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
lowerCAmelCase_ : Any = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase_ : str = key[key.find("""linear_c""" ) + len("""linear_c""" )]
lowerCAmelCase_ : Dict = key.replace(f'linear_c{idx}' , f'linear_c.{int(A__ )-1}' )
if key.startswith("""head""" ):
lowerCAmelCase_ : int = key.replace("""head""" , """classifier""" )
lowerCAmelCase_ : int = value
return new_state_dict
def UpperCamelCase_ ( A__ : int , A__ : Union[str, Any] ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase_ : int = state_dict.pop(f'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
lowerCAmelCase_ : Optional[int] = state_dict.pop(f'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase_ : List[str] = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase_ : Optional[Any] = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase_ : Union[str, Any] = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase_ : str = kv_bias[
config.hidden_sizes[i] :
]
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ : Optional[Any] = Image.open(requests.get(A__ , stream=A__ ).raw )
return image
@torch.no_grad()
def UpperCamelCase_ ( A__ : Optional[Any] , A__ : List[Any] , A__ : Tuple ):
'''simple docstring'''
lowerCAmelCase_ : str = SegformerConfig()
lowerCAmelCase_ : Optional[Any] = False
# set attributes based on model_name
lowerCAmelCase_ : int = """huggingface/label-files"""
if "segformer" in model_name:
lowerCAmelCase_ : Optional[int] = model_name[len("""segformer.""" ) : len("""segformer.""" ) + 2]
if "ade" in model_name:
lowerCAmelCase_ : List[Any] = 1_50
lowerCAmelCase_ : int = """ade20k-id2label.json"""
lowerCAmelCase_ : Tuple = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
lowerCAmelCase_ : List[str] = 19
lowerCAmelCase_ : Dict = """cityscapes-id2label.json"""
lowerCAmelCase_ : List[str] = (1, 19, 1_28, 1_28)
else:
raise ValueError(f'Model {model_name} not supported' )
elif "mit" in model_name:
lowerCAmelCase_ : Dict = True
lowerCAmelCase_ : Optional[int] = model_name[4:6]
lowerCAmelCase_ : Union[str, Any] = 10_00
lowerCAmelCase_ : int = """imagenet-1k-id2label.json"""
lowerCAmelCase_ : Optional[Any] = (1, 10_00)
else:
raise ValueError(f'Model {model_name} not supported' )
# set config attributes
lowerCAmelCase_ : Optional[Any] = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : List[Any] = {int(A__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : List[str] = idalabel
lowerCAmelCase_ : List[Any] = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
lowerCAmelCase_ : Any = [64, 1_28, 3_20, 5_12]
lowerCAmelCase_ : int = 2_56
elif size == "b2":
lowerCAmelCase_ : Any = [64, 1_28, 3_20, 5_12]
lowerCAmelCase_ : List[str] = 7_68
lowerCAmelCase_ : Any = [3, 4, 6, 3]
elif size == "b3":
lowerCAmelCase_ : List[str] = [64, 1_28, 3_20, 5_12]
lowerCAmelCase_ : Union[str, Any] = 7_68
lowerCAmelCase_ : Union[str, Any] = [3, 4, 18, 3]
elif size == "b4":
lowerCAmelCase_ : Tuple = [64, 1_28, 3_20, 5_12]
lowerCAmelCase_ : Tuple = 7_68
lowerCAmelCase_ : Tuple = [3, 8, 27, 3]
elif size == "b5":
lowerCAmelCase_ : Union[str, Any] = [64, 1_28, 3_20, 5_12]
lowerCAmelCase_ : str = 7_68
lowerCAmelCase_ : Any = [3, 6, 40, 3]
else:
raise ValueError(f'Size {size} not supported' )
# load image processor (only resize + normalize)
lowerCAmelCase_ : List[Any] = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=A__ , align=A__ , do_random_crop=A__ )
# prepare image
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Union[str, Any] = image_processor(images=A__ , return_tensors="""pt""" ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
if encoder_only:
lowerCAmelCase_ : str = torch.load(A__ , map_location=torch.device("""cpu""" ) )
else:
lowerCAmelCase_ : List[str] = torch.load(A__ , map_location=torch.device("""cpu""" ) )["""state_dict"""]
# rename keys
lowerCAmelCase_ : Dict = rename_keys(A__ , encoder_only=A__ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(A__ , A__ )
# create HuggingFace model and load state dict
if encoder_only:
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : List[Any] = SegformerForImageClassification(A__ )
else:
lowerCAmelCase_ : str = SegformerForSemanticSegmentation(A__ )
model.load_state_dict(A__ )
model.eval()
# forward pass
lowerCAmelCase_ : Tuple = model(A__ )
lowerCAmelCase_ : Union[str, Any] = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
lowerCAmelCase_ : Tuple = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
lowerCAmelCase_ : List[str] = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
lowerCAmelCase_ : List[str] = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
lowerCAmelCase_ : Dict = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
lowerCAmelCase_ : Dict = torch.tensor(
[
[
[-1.13_72E01, -1.27_87E01, -1.34_77E01],
[-1.25_36E01, -1.41_94E01, -1.44_09E01],
[-1.32_17E01, -1.48_88E01, -1.53_27E01],
],
[
[-1.47_91E01, -1.71_22E01, -1.82_77E01],
[-1.71_63E01, -1.91_92E01, -1.95_33E01],
[-1.78_97E01, -1.99_91E01, -2.03_15E01],
],
[
[7.67_23E-01, 4.19_21E-01, -7.78_78E-02],
[4.77_72E-01, 9.55_57E-03, -2.80_82E-01],
[3.60_32E-01, -2.48_26E-01, -5.11_68E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
lowerCAmelCase_ : str = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
lowerCAmelCase_ : Optional[int] = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
lowerCAmelCase_ : int = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
lowerCAmelCase_ : Optional[Any] = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , A__ , atol=1E-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="segformer.b0.512x512.ade.160k",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__A : Tuple = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 89
| 0
|
'''simple docstring'''
def _A ( snake_case = 10**9 ) -> int:
_lowercase : List[Any] = 1
_lowercase : List[Any] = 2
_lowercase : List[str] = 0
_lowercase : List[Any] = 0
_lowercase : Union[str, Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_lowercase : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'''{solution() = }''')
| 250
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 250
| 1
|
"""simple docstring"""
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def __UpperCAmelCase ( snake_case_ : Sequence[float] , snake_case_ : int , snake_case_ : int ) -> tuple[int | None, int | None, float]:
"""simple docstring"""
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_lowerCAmelCase = (low + high) // 2
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = max_subarray(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = max_subarray(snake_case_ , mid + 1 , snake_case_ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = max_cross_sum(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def __UpperCAmelCase ( snake_case_ : Sequence[float] , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> tuple[int, int, float]:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = float("""-inf""" ), -1
_lowerCAmelCase , _lowerCAmelCase = float("""-inf""" ), -1
_lowerCAmelCase = 0
for i in range(snake_case_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_lowerCAmelCase = summ
_lowerCAmelCase = i
_lowerCAmelCase = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_lowerCAmelCase = summ
_lowerCAmelCase = i
return max_left, max_right, (left_sum + right_sum)
def __UpperCAmelCase ( snake_case_ : int ) -> float:
"""simple docstring"""
_lowerCAmelCase = [randint(1 , snake_case_ ) for _ in range(snake_case_ )]
_lowerCAmelCase = time.time()
max_subarray(snake_case_ , 0 , input_size - 1 )
_lowerCAmelCase = time.time()
return end - start
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_lowerCAmelCase = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000]
_lowerCAmelCase = [time_max_subarray(snake_case_ ) for input_size in input_sizes]
print("""No of Inputs\t\tTime Taken""" )
for input_size, runtime in zip(snake_case_ , snake_case_ ):
print(snake_case_ , """\t\t""" , snake_case_ )
plt.plot(snake_case_ , snake_case_ )
plt.xlabel("""Number of Inputs""" )
plt.ylabel("""Time taken in seconds""" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 317
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'facebook/nllb-200-distilled-600M'
__UpperCamelCase = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__UpperCamelCase = 'translator'
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = LANGUAGE_CODES
__UpperCamelCase = ['text', 'text', 'text']
__UpperCamelCase = ['text']
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
_lowerCAmelCase = self.lang_to_code[src_lang]
_lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCamelCase , return_tensors="""pt""" , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.model.generate(**lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase )
| 317
| 1
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
_SCREAMING_SNAKE_CASE = 'LayoutLMv3ImageProcessor'
_SCREAMING_SNAKE_CASE = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Optional[Any] , _lowerCAmelCase : Any=None , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
snake_case_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __A , )
snake_case_ = kwargs.pop("feature_extractor" )
snake_case_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__A , __A )
def __call__( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowerCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _lowerCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , _lowerCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , _lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : int = 0 , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , **_lowerCAmelCase : Tuple , ) -> Dict:
"""simple docstring"""
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
snake_case_ = self.image_processor(images=__A , return_tensors=__A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__A , __A ):
snake_case_ = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case_ = features['''words''']
snake_case_ = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
# add pixel values
snake_case_ = features.pop("pixel_values" )
if return_overflowing_tokens is True:
snake_case_ = self.get_overflowing_images(__A , encoded_inputs["overflow_to_sample_mapping"] )
snake_case_ = images
return encoded_inputs
def lowerCAmelCase__ ( self : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any ) -> List[Any]:
"""simple docstring"""
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
snake_case_ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__A ) != len(__A ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F''' {len(__A )} and {len(__A )}''' )
return images_with_overflow
def lowerCAmelCase__ ( self : Any , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def lowerCAmelCase__ ( self : Any , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def lowerCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def lowerCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __A , )
return self.image_processor_class
@property
def lowerCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __A , )
return self.image_processor
| 159
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """codegen"""
UpperCamelCase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any, __A : Optional[int]=5_0_4_0_0, __A : Tuple=2_0_4_8, __A : Optional[int]=2_0_4_8, __A : List[str]=4_0_9_6, __A : List[str]=2_8, __A : Union[str, Any]=1_6, __A : Tuple=6_4, __A : Union[str, Any]=None, __A : Union[str, Any]="gelu_new", __A : Any=0.0, __A : Dict=0.0, __A : str=0.0, __A : Optional[int]=1E-5, __A : Any=0.0_2, __A : Any=True, __A : Union[str, Any]=5_0_2_5_6, __A : List[str]=5_0_2_5_6, __A : int=False, **__A : List[Any], ):
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Tuple = n_ctx
UpperCAmelCase : Tuple = n_positions
UpperCAmelCase : Optional[int] = n_embd
UpperCAmelCase : Union[str, Any] = n_layer
UpperCAmelCase : List[str] = n_head
UpperCAmelCase : Tuple = n_inner
UpperCAmelCase : int = rotary_dim
UpperCAmelCase : List[Any] = activation_function
UpperCAmelCase : List[str] = resid_pdrop
UpperCAmelCase : Optional[Any] = embd_pdrop
UpperCAmelCase : str = attn_pdrop
UpperCAmelCase : Tuple = layer_norm_epsilon
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : Any = bos_token_id
UpperCAmelCase : List[str] = eos_token_id
super().__init__(
bos_token_id=__A, eos_token_id=__A, tie_word_embeddings=__A, **__A )
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Any, __A : PretrainedConfig, __A : str = "default", __A : List[PatchingSpec] = None, __A : bool = False, ):
super().__init__(__A, task=__A, patching_specs=__A, use_past=__A )
if not getattr(self._config, '''pad_token_id''', __A ):
# TODO: how to do that better?
UpperCAmelCase : Union[str, Any] = 0
@property
def __magic_name__ ( self : str ):
UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__A, direction='''inputs''' )
UpperCAmelCase : int = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __magic_name__ ( self : Dict ):
return self._config.n_layer
@property
def __magic_name__ ( self : List[str] ):
return self._config.n_head
def __magic_name__ ( self : str, __A : PreTrainedTokenizer, __A : int = -1, __A : int = -1, __A : bool = False, __A : Optional[TensorType] = None, ):
UpperCAmelCase : Union[str, Any] = super(__A, self ).generate_dummy_inputs(
__A, batch_size=__A, seq_length=__A, is_pair=__A, framework=__A )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase : str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase : str = seqlen + 2
UpperCAmelCase : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase : Optional[int] = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
UpperCAmelCase : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
UpperCAmelCase : Optional[Any] = ordered_inputs['''attention_mask'''].dtype
UpperCAmelCase : Dict = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__A, __A, dtype=__A )], dim=1 )
return ordered_inputs
@property
def __magic_name__ ( self : Tuple ):
return 1_3
| 336
| 0
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : Dict = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :int = AlbertTokenizer
_UpperCAmelCase :int = AlbertTokenizerFast
_UpperCAmelCase :int = True
_UpperCAmelCase :List[str] = True
_UpperCAmelCase :Optional[Any] = True
def __UpperCamelCase( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase : List[str] = AlbertTokenizer(A_ )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = "this is a test"
UpperCamelCase : List[Any] = "this is a test"
return input_text, output_text
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = "<pad>"
UpperCamelCase : Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(A_ ) , 3_0000 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def __UpperCamelCase( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCamelCase : Any = self.get_tokenizer()
UpperCamelCase : Optional[int] = self.get_rust_tokenizer()
UpperCamelCase : List[Any] = "I was born in 92000, and this is falsé."
UpperCamelCase : Optional[Any] = tokenizer.tokenize(A_ )
UpperCamelCase : Optional[int] = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase : Optional[Any] = tokenizer.encode(A_ , add_special_tokens=A_ )
UpperCamelCase : Optional[int] = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase : List[Any] = self.get_rust_tokenizer()
UpperCamelCase : Union[str, Any] = tokenizer.encode(A_ )
UpperCamelCase : Optional[int] = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = AlbertTokenizer(A_ , keep_accents=A_ )
UpperCamelCase : Optional[int] = tokenizer.tokenize("This is a test" )
self.assertListEqual(A_ , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [48, 25, 21, 1289] )
UpperCamelCase : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
A_ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
UpperCamelCase : Optional[int] = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(A_ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
UpperCamelCase : List[str] = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = AlbertTokenizer(A_ )
UpperCamelCase : Dict = tokenizer.encode("sequence builders" )
UpperCamelCase : Tuple = tokenizer.encode("multi-sequence build" )
UpperCamelCase : str = tokenizer.build_inputs_with_special_tokens(A_ )
UpperCamelCase : int = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 140
|
class A__ : # Public class to implement a graph
def __init__( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = row
UpperCamelCase : Any = col
UpperCamelCase : Optional[Any] = graph
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Any = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
UpperCamelCase : Dict = [-1, 0, 1, -1, 1, -1, 0, 1]
UpperCamelCase : Any = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , A_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , A_ )
def __UpperCamelCase( self ): # And finally, count all islands.
'''simple docstring'''
UpperCamelCase : str = [[False for j in range(self.COL )] for i in range(self.ROW )]
UpperCamelCase : int = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(A_ , A_ , A_ )
count += 1
return count
| 140
| 1
|
"""simple docstring"""
def UpperCamelCase__ ( lowercase__ : int = 100_0000 ):
snake_case : Any = limit + 1
snake_case : int = [0] * limit
for first_term in range(1 , lowercase__ ):
for n in range(lowercase__ , lowercase__ , lowercase__ ):
snake_case : Any = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
snake_case : List[Any] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f'{solution() = }')
| 148
|
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class lowerCamelCase__ ( lowerCamelCase_ ):
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
self.check_model_type(SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case , snake_case : Optional[Any] = {}, {}
if padding is not None:
snake_case : Optional[Any] = padding
if truncation is not None:
snake_case : Union[str, Any] = truncation
if top_k is not None:
snake_case : str = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , (Image.Image, str) ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case : Tuple = {"image": image, "question": question}
else:
snake_case : List[str] = image
snake_case : Optional[int] = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return results
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
snake_case : List[Any] = load_image(inputs["image"] )
snake_case : Tuple = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(SCREAMING_SNAKE_CASE )
return model_inputs
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Optional[Any] = self.model(**SCREAMING_SNAKE_CASE )
return model_outputs
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
snake_case : List[Any] = self.model.config.num_labels
if self.framework == "pt":
snake_case : Optional[int] = model_outputs.logits.sigmoid()[0]
snake_case , snake_case : Any = probs.topk(SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
snake_case : Optional[Any] = scores.tolist()
snake_case : List[Any] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
| 148
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'xlm-roberta'
def __init__( self : Tuple , snake_case : Optional[Any]=3_0522 , snake_case : Optional[Any]=768 , snake_case : Any=12 , snake_case : Optional[int]=12 , snake_case : Any=3072 , snake_case : Optional[Any]="gelu" , snake_case : List[str]=0.1 , snake_case : Dict=0.1 , snake_case : Tuple=512 , snake_case : Union[str, Any]=2 , snake_case : Any=0.02 , snake_case : Union[str, Any]=1e-12 , snake_case : Any=1 , snake_case : Optional[int]=0 , snake_case : Optional[int]=2 , snake_case : int="absolute" , snake_case : Dict=True , snake_case : str=None , **snake_case : Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
A__ : Optional[int] = vocab_size
A__ : Optional[Any] = hidden_size
A__ : Optional[Any] = num_hidden_layers
A__ : List[str] = num_attention_heads
A__ : Union[str, Any] = hidden_act
A__ : Any = intermediate_size
A__ : Dict = hidden_dropout_prob
A__ : Any = attention_probs_dropout_prob
A__ : Tuple = max_position_embeddings
A__ : List[str] = type_vocab_size
A__ : Optional[int] = initializer_range
A__ : Any = layer_norm_eps
A__ : List[str] = position_embedding_type
A__ : str = use_cache
A__ : int = classifier_dropout
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
@property
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
A__ : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 355
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ = logging.get_logger(__name__)
A_ = Dict[str, Any]
A_ = List[Prediction]
@add_end_docstrings(UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
def __init__( self : str , *snake_case : Tuple , **snake_case : Tuple ):
'''simple docstring'''
super().__init__(*snake_case , **snake_case )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _UpperCamelCase ( self : List[Any] , **snake_case : Optional[int] ):
'''simple docstring'''
A__ : Dict = {}
if "threshold" in kwargs:
A__ : int = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self : Tuple , *snake_case : Union[str, Any] , **snake_case : Union[str, Any] ):
'''simple docstring'''
return super().__call__(*snake_case , **snake_case )
def _UpperCamelCase ( self : str , snake_case : int ):
'''simple docstring'''
A__ : List[str] = load_image(snake_case )
A__ : int = torch.IntTensor([[image.height, image.width]] )
A__ : Union[str, Any] = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
A__ : str = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
A__ : List[str] = target_size
return inputs
def _UpperCamelCase ( self : Optional[int] , snake_case : List[Any] ):
'''simple docstring'''
A__ : str = model_inputs.pop("""target_size""" )
A__ : Dict = self.model(**snake_case )
A__ : Optional[Any] = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
A__ : str = model_inputs["""bbox"""]
return model_outputs
def _UpperCamelCase ( self : Tuple , snake_case : Optional[int] , snake_case : int=0.9 ):
'''simple docstring'''
A__ : Any = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A__ , A__ : Tuple = target_size[0].tolist()
def unnormalize(snake_case : Optional[int] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
A__ , A__ : Optional[int] = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A__ : Optional[Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A__ : List[str] = [unnormalize(snake_case ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
A__ : Tuple = ["""score""", """label""", """box"""]
A__ : Any = [dict(zip(snake_case , snake_case ) ) for vals in zip(scores.tolist() , snake_case , snake_case ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A__ : Union[str, Any] = self.image_processor.post_process_object_detection(snake_case , snake_case , snake_case )
A__ : str = raw_annotations[0]
A__ : str = raw_annotation["""scores"""]
A__ : List[Any] = raw_annotation["""labels"""]
A__ : int = raw_annotation["""boxes"""]
A__ : str = scores.tolist()
A__ : Any = [self.model.config.idalabel[label.item()] for label in labels]
A__ : int = [self._get_bounding_box(snake_case ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A__ : str = ["""score""", """label""", """box"""]
A__ : Dict = [
dict(zip(snake_case , snake_case ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def _UpperCamelCase ( self : Union[str, Any] , snake_case : "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
A__ , A__ , A__ , A__ : Any = box.int().tolist()
A__ : Any = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 296
| 0
|
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__snake_case : int = 'true'
def _lowercase ( __snake_case ,__snake_case=82 ,__snake_case=16 ) -> Union[str, Any]:
set_seed(42 )
__lowerCAmelCase : Union[str, Any] = RegressionModel()
__lowerCAmelCase : Dict = deepcopy(lowerCAmelCase_ )
__lowerCAmelCase : Any = RegressionDataset(length=lowerCAmelCase_ )
__lowerCAmelCase : List[str] = DataLoader(lowerCAmelCase_ ,batch_size=lowerCAmelCase_ )
model.to(accelerator.device )
__lowerCAmelCase : int = accelerator.prepare(lowerCAmelCase_ ,lowerCAmelCase_ )
return model, ddp_model, dataloader
def _lowercase ( __snake_case ,__snake_case=False ) -> Optional[int]:
__lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
__lowerCAmelCase : Dict = load_dataset("glue" ,"mrpc" ,split="validation" )
def tokenize_function(__snake_case ):
__lowerCAmelCase : Optional[Any] = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=lowerCAmelCase_ ,max_length=lowerCAmelCase_ )
return outputs
with accelerator.main_process_first():
__lowerCAmelCase : int = dataset.map(
lowerCAmelCase_ ,batched=lowerCAmelCase_ ,remove_columns=["idx", "sentence1", "sentence2"] ,)
__lowerCAmelCase : Union[str, Any] = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__snake_case ):
if use_longest:
return tokenizer.pad(lowerCAmelCase_ ,padding="longest" ,return_tensors="pt" )
return tokenizer.pad(lowerCAmelCase_ ,padding="max_length" ,max_length=128 ,return_tensors="pt" )
return DataLoader(lowerCAmelCase_ ,shuffle=lowerCAmelCase_ ,collate_fn=lowerCAmelCase_ ,batch_size=16 )
def _lowercase ( __snake_case ,__snake_case ) -> str:
__lowerCAmelCase : Optional[int] = Accelerator(dispatch_batches=lowerCAmelCase_ ,split_batches=lowerCAmelCase_ )
__lowerCAmelCase : Optional[Any] = get_dataloader(lowerCAmelCase_ ,not dispatch_batches )
__lowerCAmelCase : List[str] = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" ,return_dict=lowerCAmelCase_ )
__lowerCAmelCase : Optional[int] = accelerator.prepare(lowerCAmelCase_ ,lowerCAmelCase_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> List[str]:
__lowerCAmelCase : Union[str, Any] = []
for batch in dataloader:
__lowerCAmelCase : Any = batch.values()
with torch.no_grad():
__lowerCAmelCase : str = model(lowerCAmelCase_ )
__lowerCAmelCase : Optional[int] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__lowerCAmelCase : Dict = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCAmelCase_ )
targs.append(lowerCAmelCase_ )
__lowerCAmelCase : Dict = torch.cat(lowerCAmelCase_ ), torch.cat(lowerCAmelCase_ )
return logits, targs
def _lowercase ( __snake_case ,__snake_case=82 ,__snake_case=False ,__snake_case=False ,__snake_case=16 ) -> List[Any]:
__lowerCAmelCase : int = get_basic_setup(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
__lowerCAmelCase : Union[str, Any] = generate_predictions(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
assert (
len(lowerCAmelCase_ ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCAmelCase_ )}"""
def _lowercase ( __snake_case = False ,__snake_case = False ) -> List[Any]:
__lowerCAmelCase : Optional[Any] = evaluate.load("glue" ,"mrpc" )
__lowerCAmelCase : Any = get_mrpc_setup(lowerCAmelCase_ ,lowerCAmelCase_ )
# First do baseline
__lowerCAmelCase : str = setup['no']
model.to(lowerCAmelCase_ )
model.eval()
for batch in dataloader:
batch.to(lowerCAmelCase_ )
with torch.inference_mode():
__lowerCAmelCase : List[Any] = model(**lowerCAmelCase_ )
__lowerCAmelCase : Optional[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowerCAmelCase_ ,references=batch["labels"] )
__lowerCAmelCase : Any = metric.compute()
# Then do distributed
__lowerCAmelCase : int = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__lowerCAmelCase : List[str] = model(**lowerCAmelCase_ )
__lowerCAmelCase : int = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase : Optional[int] = batch['labels']
__lowerCAmelCase : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowerCAmelCase_ ,references=lowerCAmelCase_ )
__lowerCAmelCase : Any = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def _lowercase ( ) -> str:
__lowerCAmelCase : Optional[int] = Accelerator(split_batches=lowerCAmelCase_ ,dispatch_batches=lowerCAmelCase_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(lowerCAmelCase_ ,lowerCAmelCase_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__lowerCAmelCase : Optional[Any] = Accelerator(split_batches=lowerCAmelCase_ ,dispatch_batches=lowerCAmelCase_ )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(lowerCAmelCase_ ,99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
__lowerCAmelCase : int = Accelerator()
test_torch_metrics(lowerCAmelCase_ ,512 )
accelerator.state._reset_state()
def _lowercase ( __snake_case ) -> int:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 269
|
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = BarthezTokenizer
lowerCAmelCase : int = BarthezTokenizerFast
lowerCAmelCase : Dict = True
lowerCAmelCase : str = True
def __lowercase ( self : List[Any] ):
super().setUp()
_a : List[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname ,legacy_format=_UpperCAmelCase )
_a : Union[str, Any] = tokenizer
def __lowercase ( self : Tuple ):
_a : Optional[Any] = '<pad>'
_a : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) ,_UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) ,_UpperCAmelCase )
def __lowercase ( self : str ):
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-1] ,'<mask>' )
self.assertEqual(len(_UpperCAmelCase ) ,101122 )
def __lowercase ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size ,101122 )
@require_torch
def __lowercase ( self : Dict ):
_a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_a : Dict = [0, 57, 3018, 70307, 91, 2]
_a : Dict = self.tokenizer(
_UpperCAmelCase ,max_length=len(_UpperCAmelCase ) ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase )
self.assertEqual((2, 6) ,batch.input_ids.shape )
self.assertEqual((2, 6) ,batch.attention_mask.shape )
_a : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
_a : str = self.get_tokenizer()
_a : List[str] = self.get_rust_tokenizer()
_a : Dict = 'I was born in 92000, and this is falsé.'
_a : List[Any] = tokenizer.tokenize(_UpperCAmelCase )
_a : Tuple = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Optional[Any] = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Union[str, Any] = self.get_rust_tokenizer()
_a : Any = tokenizer.encode(_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
@slow
def __lowercase ( self : Optional[int] ):
# fmt: off
_a : Optional[int] = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_a : Optional[Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=_UpperCAmelCase ,)
| 89
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : int = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Union[str, Any] = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 353
|
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : Optional[Any] = len(lowerCAmelCase__ )
for i in range(length - 1 ):
UpperCAmelCase__ : Optional[Any] = i
for k in range(i + 1 , lowerCAmelCase__ ):
if collection[k] < collection[least]:
UpperCAmelCase__ : Dict = k
if least != i:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
UpperCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 299
| 0
|
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def lowercase ( SCREAMING_SNAKE_CASE__ : Sequence[float] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_snake_case : Dict = (low + high) // 2
_snake_case , _snake_case , _snake_case : int = max_subarray(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case , _snake_case : Optional[Any] = max_subarray(SCREAMING_SNAKE_CASE__ , mid + 1 , SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case , _snake_case : Dict = max_cross_sum(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def lowercase ( SCREAMING_SNAKE_CASE__ : Sequence[float] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> tuple[int, int, float]:
_snake_case , _snake_case : Dict = float("""-inf""" ), -1
_snake_case , _snake_case : int = float("""-inf""" ), -1
_snake_case : int | float = 0
for i in range(SCREAMING_SNAKE_CASE__ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_snake_case : Optional[Any] = summ
_snake_case : str = i
_snake_case : Any = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_snake_case : Dict = summ
_snake_case : List[Any] = i
return max_left, max_right, (left_sum + right_sum)
def lowercase ( SCREAMING_SNAKE_CASE__ : int ) -> float:
_snake_case : Optional[int] = [randint(1 , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
_snake_case : Optional[Any] = time.time()
max_subarray(SCREAMING_SNAKE_CASE__ , 0 , input_size - 1 )
_snake_case : Dict = time.time()
return end - start
def lowercase ( ) -> None:
_snake_case : str = [10, 100, 1_000, 10_000, 50_000, 100_000, 200_000, 300_000, 400_000, 500_000]
_snake_case : Tuple = [time_max_subarray(SCREAMING_SNAKE_CASE__ ) for input_size in input_sizes]
print("""No of Inputs\t\tTime Taken""" )
for input_size, runtime in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
print(SCREAMING_SNAKE_CASE__ , """\t\t""" , SCREAMING_SNAKE_CASE__ )
plt.plot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
plt.xlabel("""Number of Inputs""" )
plt.ylabel("""Time taken in seconds""" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 317
|
from ...processing_utils import ProcessorMixin
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""image_processor""", """feature_extractor"""]
snake_case_ : List[Any] = """TvltImageProcessor"""
snake_case_ : Dict = """TvltFeatureExtractor"""
def __init__( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
super().__init__(image_processor=lowerCAmelCase , feature_extractor=lowerCAmelCase)
_snake_case : List[Any] = image_processor
_snake_case : List[Any] = feature_extractor
def __call__( self : Union[str, Any] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Dict=False , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Any , ) -> Any:
"""simple docstring"""
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""")
_snake_case : Union[str, Any] = None
if images is not None:
_snake_case : Any = self.image_processor(lowerCAmelCase , mask_pixel=lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase)
if images_mixed is not None:
_snake_case : Union[str, Any] = self.image_processor(lowerCAmelCase , is_mixed=lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase)
if audio is not None:
_snake_case : int = self.feature_extractor(
lowerCAmelCase , *lowerCAmelCase , sampling_rate=lowerCAmelCase , mask_audio=lowerCAmelCase , **lowerCAmelCase)
_snake_case : Any = {}
if audio is not None:
output_dict.update(lowerCAmelCase)
if images is not None:
output_dict.update(lowerCAmelCase)
if images_mixed_dict is not None:
output_dict.update(lowerCAmelCase)
return output_dict
@property
def UpperCamelCase_ ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_snake_case : Optional[Any] = self.image_processor.model_input_names
_snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 317
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCAmelCase__ = KandinskyVaaInpaintPipeline
lowerCAmelCase__ = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
lowerCAmelCase__ = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
lowerCAmelCase__ = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCAmelCase__ = False
@property
def lowercase_ ( self : str ):
'''simple docstring'''
return 32
@property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return 100
@property
def lowercase_ ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase__ : Dict = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : str = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.dummy_unet
UpperCAmelCase__ : Dict = self.dummy_movq
UpperCAmelCase__ : Optional[Any] = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='''linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__lowerCamelCase , )
UpperCAmelCase__ : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowercase_ ( self : Union[str, Any] , _A : int , _A : Optional[int]=0 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
UpperCAmelCase__ : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowerCamelCase )
# create init_image
UpperCAmelCase__ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Dict = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
UpperCAmelCase__ : int = np.ones((64, 64) , dtype=np.floataa )
UpperCAmelCase__ : Tuple = 0
if str(__lowerCamelCase ).startswith('''mps''' ):
UpperCAmelCase__ : str = torch.manual_seed(__lowerCamelCase )
else:
UpperCAmelCase__ : int = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCAmelCase__ : Any = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = '''cpu'''
UpperCAmelCase__ : Tuple = self.get_dummy_components()
UpperCAmelCase__ : Any = self.pipeline_class(**__lowerCamelCase )
UpperCAmelCase__ : Tuple = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCAmelCase__ : List[str] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
UpperCAmelCase__ : int = output.images
UpperCAmelCase__ : Dict = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
UpperCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase__ : List[str] = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : str = np.array(
[0.5_0_7_7_5_9_0_3, 0.4_9_5_2_7_1_9_5, 0.4_8_8_2_4_5_4_3, 0.5_0_1_9_2_2_3_7, 0.4_8_6_4_4_9_0_6, 0.4_9_3_7_3_8_1_4, 0.4_7_8_0_5_9_8, 0.4_7_2_3_4_8_2_7, 0.4_8_3_2_7_8_4_8] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def lowercase_ ( self : Tuple ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''' )
UpperCAmelCase__ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
UpperCAmelCase__ : Optional[int] = np.ones((768, 768) , dtype=np.floataa )
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ : str = '''a hat'''
UpperCAmelCase__ : str = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder-inpaint''' , torch_dtype=torch.floataa )
UpperCAmelCase__ : Dict = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = pipe_prior(
__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase__ : Dict = pipeline(
image=__lowerCamelCase , mask_image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
UpperCAmelCase__ : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 367
|
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a__ ( lowerCAmelCase__ ) -> List[Any]:
return 1 / (1 + np.exp(-z ))
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
return (-y * np.log(lowerCAmelCase__ ) - (1 - y) * np.log(1 - h )).mean()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
UpperCAmelCase__ : str = np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
return np.sum(y * scores - np.log(1 + np.exp(lowerCAmelCase__ ) ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=7_00_00 ) -> List[Any]:
UpperCAmelCase__ : Tuple = np.zeros(x.shape[1] )
for iterations in range(lowerCAmelCase__ ):
UpperCAmelCase__ : List[Any] = np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = sigmoid_function(lowerCAmelCase__ )
UpperCAmelCase__ : int = np.dot(x.T , h - y ) / y.size
UpperCAmelCase__ : Optional[int] = theta - alpha * gradient # updating the weights
UpperCAmelCase__ : Dict = np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : int = sigmoid_function(lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = cost_function(lowerCAmelCase__ , lowerCAmelCase__ )
if iterations % 1_00 == 0:
print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCamelCase__ = datasets.load_iris()
UpperCamelCase__ = iris.data[:, :2]
UpperCamelCase__ = (iris.target != 0) * 1
UpperCamelCase__ = 0.1
UpperCamelCase__ = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def a__ ( lowerCAmelCase__ ) -> Dict:
return sigmoid_function(
np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(1_0, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((UpperCamelCase__) , (UpperCamelCase__)) = (x[:, 0].min(), x[:, 0].max())
((UpperCamelCase__) , (UpperCamelCase__)) = (x[:, 1].min(), x[:, 1].max())
((UpperCamelCase__) , (UpperCamelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCamelCase__ = np.c_[xxa.ravel(), xxa.ravel()]
UpperCamelCase__ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 299
| 0
|
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if n == 1 or not isinstance(__lowercase ,__lowercase ):
return 0
elif n == 2:
return 1
else:
A_ : str = [0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
A_ : int = 0
A_ : str = 2
while digits < n:
index += 1
A_ : Union[str, Any] = len(str(fibonacci(__lowercase ) ) )
return index
def UpperCamelCase ( __lowercase : int = 10_00 ):
'''simple docstring'''
return fibonacci_digits_index(__lowercase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 140
|
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
_UpperCAmelCase = """scheduler_config.json"""
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = 1
lowerCamelCase_ = 2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = 5
lowerCamelCase_ = 6
lowerCamelCase_ = 7
lowerCamelCase_ = 8
lowerCamelCase_ = 9
lowerCamelCase_ = 1_0
lowerCamelCase_ = 1_1
lowerCamelCase_ = 1_2
lowerCamelCase_ = 1_3
lowerCamelCase_ = 1_4
@dataclass
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = 42
class UpperCAmelCase :
'''simple docstring'''
lowerCamelCase_ = SCHEDULER_CONFIG_NAME
lowerCamelCase_ = []
lowerCamelCase_ = True
@classmethod
def lowerCAmelCase_ ( cls , lowercase = None , lowercase = None , lowercase=False , **lowercase , ):
"""simple docstring"""
A_ , A_ , A_ : int = cls.load_config(
pretrained_model_name_or_path=lowercase , subfolder=lowercase , return_unused_kwargs=lowercase , return_commit_hash=lowercase , **lowercase , )
return cls.from_config(lowercase , return_unused_kwargs=lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase = False , **lowercase ):
"""simple docstring"""
self.save_config(save_directory=lowercase , push_to_hub=lowercase , **lowercase )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def lowerCAmelCase_ ( cls ):
"""simple docstring"""
A_ : Optional[Any] = list(set([cls.__name__] + cls._compatibles ) )
A_ : Any = importlib.import_module(__name__.split('.' )[0] )
A_ : Tuple = [
getattr(lowercase , lowercase ) for c in compatible_classes_str if hasattr(lowercase , lowercase )
]
return compatible_classes
| 140
| 1
|
from manim import *
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def _lowerCamelCase ( self :str ) -> Optional[int]:
__UpperCamelCase : Tuple = Rectangle(height=0.5 , width=0.5 )
__UpperCamelCase : Optional[int] = Rectangle(height=0.25 , width=0.25 )
__UpperCamelCase : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__UpperCamelCase : Any = [mem.copy() for i in range(6 )]
__UpperCamelCase : Optional[int] = [mem.copy() for i in range(6 )]
__UpperCamelCase : List[Any] = VGroup(*a ).arrange(a , buff=0 )
__UpperCamelCase : Optional[int] = VGroup(*a ).arrange(a , buff=0 )
__UpperCamelCase : List[str] = VGroup(a , a ).arrange(a , buff=0 )
__UpperCamelCase : Optional[Any] = Text("CPU" , font_size=2_4 )
__UpperCamelCase : Optional[int] = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a )
__UpperCamelCase : Tuple = [mem.copy() for i in range(4 )]
__UpperCamelCase : Dict = VGroup(*a ).arrange(a , buff=0 )
__UpperCamelCase : Tuple = Text("GPU" , font_size=2_4 )
__UpperCamelCase : Optional[int] = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
gpu.move_to([-1, -1, 0] )
self.add(a )
__UpperCamelCase : Any = [mem.copy() for i in range(6 )]
__UpperCamelCase : Any = VGroup(*a ).arrange(a , buff=0 )
__UpperCamelCase : Dict = Text("Model" , font_size=2_4 )
__UpperCamelCase : List[str] = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
model.move_to([3, -1.0, 0] )
self.add(a )
__UpperCamelCase : List[str] = []
__UpperCamelCase : Union[str, Any] = []
__UpperCamelCase : Optional[Any] = []
for i, rect in enumerate(a ):
rect.set_stroke(a )
__UpperCamelCase : Union[str, Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=a , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=a , buff=0.0 )
self.add(a )
model_cpu_arr.append(a )
self.add(*a , *a , *a )
__UpperCamelCase : Any = [mem.copy() for i in range(6 )]
__UpperCamelCase : Tuple = VGroup(*a ).arrange(a , buff=0 )
__UpperCamelCase : Tuple = Text("Loaded Checkpoint" , font_size=2_4 )
__UpperCamelCase : Tuple = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
checkpoint.move_to([3, 0.5, 0] )
self.add(a )
__UpperCamelCase : List[str] = []
__UpperCamelCase : str = []
for i, rect in enumerate(a ):
__UpperCamelCase : Dict = fill.copy().set_fill(a , opacity=0.7 )
target.move_to(a )
ckpt_arr.append(a )
__UpperCamelCase : Any = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(a )
self.add(*a , *a )
__UpperCamelCase : List[str] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__UpperCamelCase : Any = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a , a )
__UpperCamelCase : str = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=1_8 , )
blue_text.next_to(a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(a )
__UpperCamelCase : Optional[Any] = MarkupText(
f'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
__UpperCamelCase : Tuple = [meta_mem.copy() for i in range(6 )]
__UpperCamelCase : Tuple = [meta_mem.copy() for i in range(6 )]
__UpperCamelCase : Dict = VGroup(*a ).arrange(a , buff=0 )
__UpperCamelCase : Dict = VGroup(*a ).arrange(a , buff=0 )
__UpperCamelCase : List[str] = VGroup(a , a ).arrange(a , buff=0 )
__UpperCamelCase : int = Text("Disk" , font_size=2_4 )
__UpperCamelCase : str = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(a , run_time=3 ) , Write(a , run_time=1 ) , Create(a , run_time=1 ) )
__UpperCamelCase : str = []
for i, rect in enumerate(a ):
__UpperCamelCase : Tuple = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(a , run_time=1.5 ) )
self.play(*a )
self.play(FadeOut(a ) )
__UpperCamelCase : Tuple = MarkupText(f'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(a , run_time=3 ) )
self.play(
FadeOut(a , a , *a , *a ) , )
self.wait()
| 151
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowercase : Any = logging.get_logger(__name__)
lowercase : Any = {'vocab_file': 'spiece.model'}
lowercase : int = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def __init__( self :int , a :List[Any] , a :Optional[Any]=False , a :List[str]=True , a :str=False , a :Optional[Any]="<s>" , a :Tuple="</s>" , a :int="<unk>" , a :Optional[Any]="<sep>" , a :List[str]="<pad>" , a :Any="<cls>" , a :List[Any]="<mask>" , a :Optional[Any]=["<eop>", "<eod>"] , a :Optional[Dict[str, Any]] = None , **a :List[str] , ) -> None:
__UpperCamelCase : Any = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
__UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , additional_special_tokens=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
__UpperCamelCase : int = 3
__UpperCamelCase : Union[str, Any] = do_lower_case
__UpperCamelCase : str = remove_space
__UpperCamelCase : int = keep_accents
__UpperCamelCase : Optional[int] = vocab_file
__UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
__UpperCamelCase : Optional[Any] = jieba
__UpperCamelCase : Optional[int] = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowerCamelCase ( self :Optional[int] ) -> List[str]:
return len(self.sp_model )
def _lowerCamelCase ( self :Dict ) -> str:
__UpperCamelCase : Optional[int] = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :Optional[int] ) -> int:
__UpperCamelCase : Tuple = self.__dict__.copy()
__UpperCamelCase : Optional[Any] = None
return state
def __setstate__( self :Optional[int] , a :Dict ) -> str:
__UpperCamelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCamelCase : Union[str, Any] = {}
__UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self :List[Any] , a :str ) -> int:
if self.remove_space:
__UpperCamelCase : int = " ".join(inputs.strip().split() )
else:
__UpperCamelCase : Union[str, Any] = inputs
__UpperCamelCase : List[str] = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
__UpperCamelCase : Tuple = unicodedata.normalize("NFKD" , a )
__UpperCamelCase : Optional[Any] = "".join([c for c in outputs if not unicodedata.combining(a )] )
if self.do_lower_case:
__UpperCamelCase : Any = outputs.lower()
return outputs
def _lowerCamelCase ( self :Tuple , a :str ) -> List[str]:
__UpperCamelCase : List[Any] = self.preprocess_text(a )
__UpperCamelCase : int = self.sp_model.encode(a , out_type=a )
__UpperCamelCase : Optional[Any] = []
for piece in pieces:
if len(a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
__UpperCamelCase : str = self.sp_model.EncodeAsPieces(piece[:-1].replace(a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__UpperCamelCase : List[str] = cur_pieces[1:]
else:
__UpperCamelCase : int = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a )
else:
new_pieces.append(a )
return new_pieces
def _lowerCamelCase ( self :str , a :Dict ) -> List[str]:
return self.sp_model.PieceToId(a )
def _lowerCamelCase ( self :Tuple , a :int ) -> Tuple:
return self.sp_model.IdToPiece(a )
def _lowerCamelCase ( self :Union[str, Any] , a :Union[str, Any] ) -> List[Any]:
__UpperCamelCase : str = "".join(a ).replace(a , " " ).strip()
return out_string
def _lowerCamelCase ( self :Any , a :List[int] , a :Optional[List[int]] = None ) -> List[int]:
__UpperCamelCase : Tuple = [self.sep_token_id]
__UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self :Any , a :List[int] , a :Optional[List[int]] = None , a :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is not None:
return ([0] * len(a )) + [1] + ([0] * len(a )) + [1, 1]
return ([0] * len(a )) + [1, 1]
def _lowerCamelCase ( self :Dict , a :List[int] , a :Optional[List[int]] = None ) -> List[int]:
__UpperCamelCase : Optional[int] = [self.sep_token_id]
__UpperCamelCase : Dict = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCamelCase ( self :Union[str, Any] , a :str , a :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : Tuple = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , "wb" ) as fi:
__UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
def _lowerCamelCase ( self :str , *a :str , **a :Any ) -> Tuple:
__UpperCamelCase : int = super()._decode(*a , **a )
__UpperCamelCase : int = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 151
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : Union[str, Any] =DebertaTokenizer
a : List[str] =True
a : Optional[Any] =DebertaTokenizerFast
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
lowerCAmelCase : Optional[int] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowerCAmelCase : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase : List[str] = {"unk_token": "[UNK]"}
lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = "lower newer"
lowerCAmelCase : Any = "lower newer"
return input_text, output_text
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.get_tokenizer()
lowerCAmelCase : Tuple = "lower newer"
lowerCAmelCase : List[Any] = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase : Optional[Any] = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowerCAmelCase : Dict = tokens + [tokenizer.unk_token]
lowerCAmelCase : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = self.get_tokenizer()
lowerCAmelCase : Optional[int] = tokenizer("Hello" , "World" )
lowerCAmelCase : Union[str, Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
lowerCAmelCase : Tuple = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ )
lowerCAmelCase : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ )
lowerCAmelCase : List[str] = tokenizer.encode(
"sequence builders" , add_special_tokens=snake_case__ , add_prefix_space=snake_case__ )
lowerCAmelCase : List[Any] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=snake_case__ , add_prefix_space=snake_case__ )
lowerCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(snake_case__ )
lowerCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCAmelCase : Tuple = tokenizer_class.from_pretrained("microsoft/deberta-base" )
lowerCAmelCase : Tuple = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
lowerCAmelCase : List[Any] = tokenizer(snake_case__ , padding=snake_case__ )
lowerCAmelCase : List[Any] = [tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ ) for seq in encoding["input_ids"]]
# fmt: off
lowerCAmelCase : List[str] = {
"input_ids": [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCAmelCase : int = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , snake_case__ )
for expected, decoded in zip(snake_case__ , snake_case__ ):
self.assertEqual(snake_case__ , snake_case__ )
| 108
|
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
@dataclass
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to SortishSamler or not."} )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "whether to use adafactor"} )
__snake_case : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
__snake_case : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
__snake_case : Optional[float] = field(default=lowerCAmelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} )
__snake_case : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
__snake_case : Optional[str] = field(
default="linear" , metadata={"help": F"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 296
| 0
|
def _lowerCAmelCase ( ):
UpperCAmelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
UpperCAmelCase = 6
UpperCAmelCase = 1
UpperCAmelCase = 1901
UpperCAmelCase = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
UpperCAmelCase = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
UpperCAmelCase = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
UpperCAmelCase = day - days_per_month[month - 2]
if month > 12:
year += 1
UpperCAmelCase = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 352
|
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__magic_name__ = logging.get_logger(__name__)
class lowercase ( A__ ):
'''simple docstring'''
def __init__( self , *_snake_case , **_snake_case ) -> None:
"""simple docstring"""
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 152
| 0
|
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCamelCase : Union[str, Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase : List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCamelCase : Tuple = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
lowerCamelCase : Dict = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase : Optional[Any] = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCamelCase : List[str] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
snake_case : List[Any] = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" ,__lowerCamelCase )
return [m.group(0 ) for m in matches]
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
snake_case : Any = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
snake_case : Union[str, Any] = {
config.replace("""Config""" ,"""""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
snake_case : Any = collections.defaultdict(__lowerCamelCase )
snake_case : str = collections.defaultdict(__lowerCamelCase )
snake_case : str = collections.defaultdict(__lowerCamelCase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(__lowerCamelCase ):
snake_case : Union[str, Any] = None
if _re_tf_models.match(__lowerCamelCase ) is not None:
snake_case : Dict = tf_models
snake_case : Optional[int] = _re_tf_models.match(__lowerCamelCase ).groups()[0]
elif _re_flax_models.match(__lowerCamelCase ) is not None:
snake_case : str = flax_models
snake_case : Any = _re_flax_models.match(__lowerCamelCase ).groups()[0]
elif _re_pt_models.match(__lowerCamelCase ) is not None:
snake_case : Any = pt_models
snake_case : List[Any] = _re_pt_models.match(__lowerCamelCase ).groups()[0]
if lookup_dict is not None:
while len(__lowerCamelCase ) > 0:
if attr_name in model_prefix_to_model_type:
snake_case : int = True
break
# Try again after removing the last word in the name
snake_case : Any = """""".join(camel_case_split(__lowerCamelCase )[:-1] )
snake_case : Optional[Any] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
snake_case : Optional[int] = list(__lowerCamelCase )
all_models.sort()
snake_case : Optional[Any] = {"""model_type""": all_models}
snake_case : Optional[Any] = [pt_models[t] for t in all_models]
snake_case : str = [tf_models[t] for t in all_models]
snake_case : List[Any] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
snake_case : Any = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
snake_case : str = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
snake_case : Optional[int] = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
snake_case : str = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
snake_case : Any = """AutoTokenizer"""
snake_case : Tuple = [processors[t] for t in all_models]
return pd.DataFrame(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
snake_case : Dict = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
snake_case : Any = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
snake_case : Any = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ):
# The type of pipeline may not exist in this framework
if not hasattr(__lowerCamelCase ,__lowerCamelCase ):
continue
# First extract all model_names
snake_case : Union[str, Any] = []
for name in getattr(__lowerCamelCase ,__lowerCamelCase ).values():
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
model_names.append(__lowerCamelCase )
else:
model_names.extend(list(__lowerCamelCase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Optional[Any]:
snake_case : Tuple = get_frameworks_table()
snake_case : Tuple = Dataset.from_pandas(__lowerCamelCase )
snake_case : List[Any] = hf_hub_download(
"""huggingface/transformers-metadata""" ,"""pipeline_tags.json""" ,repo_type="""dataset""" ,token=__lowerCamelCase )
snake_case : int = Dataset.from_json(__lowerCamelCase )
snake_case : List[Any] = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(__lowerCamelCase ) )
}
snake_case : List[Any] = update_pipeline_and_auto_class_table(__lowerCamelCase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
snake_case : str = sorted(table.keys() )
snake_case : Optional[Any] = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
snake_case : Tuple = Dataset.from_pandas(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(__lowerCamelCase ,"""frameworks.json""" ) )
tags_dataset.to_json(os.path.join(__lowerCamelCase ,"""pipeline_tags.json""" ) )
if commit_sha is not None:
snake_case : Any = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
snake_case : Union[str, Any] = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""" ,folder_path=__lowerCamelCase ,repo_type="""dataset""" ,token=__lowerCamelCase ,commit_message=__lowerCamelCase ,)
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
snake_case : Any = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
snake_case : List[Any] = transformers_module.pipelines.SUPPORTED_TASKS
snake_case : Dict = []
for key in pipeline_tasks:
if key not in in_table:
snake_case : Dict = pipeline_tasks[key]["""pt"""]
if isinstance(__lowerCamelCase ,(list, tuple) ):
snake_case : Union[str, Any] = model[0]
snake_case : Any = model.__name__
if model not in in_table.values():
missing.append(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
snake_case : Union[str, Any] = """, """.join(__lowerCamelCase )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
lowerCamelCase : List[str] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 124
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCAmelCase = "pt"
elif is_tf_available():
__UpperCAmelCase = "tf"
else:
__UpperCAmelCase = "jax"
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =ByTaTokenizer
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Tuple:
super().setUp()
SCREAMING_SNAKE_CASE_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ) -> List[str]:
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def _UpperCamelCase ( self , **_A ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _UpperCamelCase ( self , _A , _A=False , _A=20 , _A=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
SCREAMING_SNAKE_CASE_ = []
for i in range(len(_A ) ):
try:
SCREAMING_SNAKE_CASE_ = tokenizer.decode([i] , clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda _A : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , _A ) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
SCREAMING_SNAKE_CASE_ = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
SCREAMING_SNAKE_CASE_ = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE_ = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
SCREAMING_SNAKE_CASE_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE_ = ''' ''' + output_txt
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
SCREAMING_SNAKE_CASE_ = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = '''Unicode €.'''
SCREAMING_SNAKE_CASE_ = tokenizer(_A )
SCREAMING_SNAKE_CASE_ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A )
self.assertEqual(_A , '''Unicode €.</s>''' )
SCREAMING_SNAKE_CASE_ = tokenizer('''e è é ê ë''' )
SCREAMING_SNAKE_CASE_ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A )
self.assertEqual(_A , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(_A , padding=_A , return_tensors=_A )
self.assertIsInstance(_A , _A )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE_ = tokenizer(_A , padding=_A , return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _A )
self.assertIn('''attention_mask''' , _A )
self.assertNotIn('''decoder_input_ids''' , _A )
self.assertNotIn('''decoder_attention_mask''' , _A )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = [
'''Summary of the text.''',
'''Another summary.''',
]
SCREAMING_SNAKE_CASE_ = tokenizer(
text_target=_A , max_length=32 , padding='''max_length''' , truncation=_A , return_tensors=_A )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization. </s>''']
SCREAMING_SNAKE_CASE_ = ['''Summary of the text. </s>''']
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE_ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(_A , text_target=_A )
self.assertEqual(_A , batch['''input_ids'''][0] )
self.assertEqual(_A , batch['''labels'''][0] )
def _UpperCamelCase ( self ) -> Dict:
# safety check on max_len default value so we are sure the test works
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = ''' He is very happy, UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
SCREAMING_SNAKE_CASE_ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(_A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(_A )
SCREAMING_SNAKE_CASE_ = [F'''<extra_id_{i}>''' for i in range(125 )]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(_A , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_A )]
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(_A )
self.assertTrue(tokenizer.decode([255] ) == '''''' )
def _UpperCamelCase ( self ) -> int:
pass
def _UpperCamelCase ( self ) -> Any:
pass
def _UpperCamelCase ( self ) -> Any:
pass
def _UpperCamelCase ( self ) -> Optional[int]:
pass
def _UpperCamelCase ( self ) -> Union[str, Any]:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A , _A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(
_A , skip_special_tokens=_A )
for attr in attributes_list:
setattr(_A , attr + '''_id''' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '''_id''' ) , _A )
setattr(_A , attr + '''_id''' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '''_id''' ) , _A )
setattr(_A , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [] )
setattr(_A , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
| 299
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCamelCase ( _lowercase , _lowercase ) -> float:
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(_lowercase , _lowercase ) ) )
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[list[list[float] | float]]:
if dataset.ndim != value_array.ndim:
UpperCAmelCase : Optional[int] = (
"""Wrong input data's dimensions... """
F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(_lowercase )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCAmelCase : Optional[Any] = (
"""Wrong input data's shape... """
F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(_lowercase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
UpperCAmelCase : Optional[int] = (
"""Input data have different datatype... """
F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(_lowercase )
UpperCAmelCase : Union[str, Any] = []
for value in value_array:
UpperCAmelCase : List[str] = euclidean(_lowercase , dataset[0] )
UpperCAmelCase : List[str] = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCAmelCase : Tuple = euclidean(_lowercase , _lowercase )
if dist > temp_dist:
UpperCAmelCase : str = temp_dist
UpperCAmelCase : List[Any] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCamelCase ( _lowercase , _lowercase ) -> float:
return np.dot(_lowercase , _lowercase ) / (norm(_lowercase ) * norm(_lowercase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
a : int = logging.get_logger(__name__)
a : int = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
a : Tuple = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
a : Optional[int] = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'whisper'
lowercase = ['past_key_values']
lowercase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , A=51865 , A=80 , A=6 , A=4 , A=6 , A=4 , A=1536 , A=1536 , A=0.0 , A=0.0 , A=50257 , A=True , A=True , A="gelu" , A=256 , A=0.0 , A=0.0 , A=0.0 , A=0.0_2 , A=False , A=1500 , A=448 , A=50256 , A=50256 , A=50256 , A=None , A=[220, 50256] , A=False , A=256 , A=False , A=0.0_5 , A=10 , A=2 , A=0.0 , A=10 , A=0 , A=7 , **A , ) -> Optional[Any]:
UpperCAmelCase : str = vocab_size
UpperCAmelCase : Union[str, Any] = num_mel_bins
UpperCAmelCase : Tuple = d_model
UpperCAmelCase : Optional[int] = encoder_layers
UpperCAmelCase : List[str] = encoder_attention_heads
UpperCAmelCase : Optional[int] = decoder_layers
UpperCAmelCase : int = decoder_attention_heads
UpperCAmelCase : Optional[int] = decoder_ffn_dim
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : List[str] = dropout
UpperCAmelCase : Optional[Any] = attention_dropout
UpperCAmelCase : Optional[Any] = activation_dropout
UpperCAmelCase : Optional[Any] = activation_function
UpperCAmelCase : Optional[Any] = init_std
UpperCAmelCase : int = encoder_layerdrop
UpperCAmelCase : Dict = decoder_layerdrop
UpperCAmelCase : Optional[int] = use_cache
UpperCAmelCase : List[str] = encoder_layers
UpperCAmelCase : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Union[str, Any] = max_source_positions
UpperCAmelCase : Tuple = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : List[str] = classifier_proj_size
UpperCAmelCase : Optional[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Optional[Any] = apply_spec_augment
UpperCAmelCase : int = mask_time_prob
UpperCAmelCase : int = mask_time_length
UpperCAmelCase : Dict = mask_time_min_masks
UpperCAmelCase : List[str] = mask_feature_prob
UpperCAmelCase : Optional[int] = mask_feature_length
UpperCAmelCase : int = mask_feature_min_masks
UpperCAmelCase : List[Any] = median_filter_width
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , is_encoder_decoder=A , decoder_start_token_id=A , suppress_tokens=A , begin_suppress_tokens=A , **A , )
class UpperCamelCase_ ( __magic_name__ ):
@property
def _lowercase( self ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : str = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
UpperCAmelCase : List[Any] = {0: """batch"""}
else:
UpperCAmelCase : Dict = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(A , direction="""inputs""" )
return common_inputs
def _lowercase( self , A , A = -1 , A = -1 , A = False , A = None , A = 22050 , A = 5.0 , A = 220 , ) -> Mapping[str, Any]:
UpperCAmelCase : Optional[int] = OrderedDict()
UpperCAmelCase : Any = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=A , framework=A , sampling_rate=A , time_duration=A , frequency=A , )
UpperCAmelCase : List[str] = encoder_inputs["""input_features"""].shape[2]
UpperCAmelCase : List[Any] = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase : Any = super().generate_dummy_inputs(
preprocessor.tokenizer , A , A , A , A )
UpperCAmelCase : List[str] = encoder_inputs.pop("""input_features""" )
UpperCAmelCase : Any = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
UpperCAmelCase : Union[str, Any] = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def _lowercase( self ) -> float:
return 1e-3
| 338
| 1
|
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : int ) -> None:
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 32
|
import functools
def A__ ( __lowerCamelCase, __lowerCamelCase ):
# Validation
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(__lowerCamelCase ) != 3 or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(__lowerCamelCase ) == 0:
return 0
if min(__lowerCamelCase ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(__lowerCamelCase ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
SCREAMING_SNAKE_CASE_ = set(__lowerCamelCase )
@functools.cache
def dynamic_programming(__lowerCamelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ), costs[1] + dynamic_programming(index + 7 ), costs[2] + dynamic_programming(index + 30 ), )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 0
|
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _A :
@staticmethod
def lowercase__ ( *__magic_name__ : str , **__magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class _A ( unittest.TestCase ):
@require_torch
def lowercase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
__snake_case : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__snake_case : Optional[Any] = image_classifier(__magic_name__ , candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__magic_name__ ) , [
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}],
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """c"""}, {"""score""": 0.333, """label""": """b"""}],
] , )
__snake_case : Optional[int] = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__magic_name__ ) , [
[
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
],
[
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
],
[
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
],
[
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
],
[
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
],
] , )
@require_tf
def lowercase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" )
__snake_case : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__snake_case : Tuple = image_classifier(__magic_name__ , candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(__magic_name__ ) , [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}] , )
__snake_case : Union[str, Any] = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__magic_name__ ) , [
[
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
],
[
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
],
[
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
],
[
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
],
[
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
{"""score""": 0.333, """label""": ANY(__magic_name__ )},
],
] , )
@slow
@require_torch
def lowercase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
__snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__snake_case : Optional[Any] = image_classifier(__magic_name__ , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(__magic_name__ ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
__snake_case : Optional[Any] = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__magic_name__ ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def lowercase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case : Tuple = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
__snake_case : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__snake_case : Dict = image_classifier(__magic_name__ , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(__magic_name__ ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
__snake_case : int = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__magic_name__ ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
| 13
|
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _A :
def __init__( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple=2 , __magic_name__ : List[Any]=3 , __magic_name__ : Optional[int]=4 , __magic_name__ : Any=2 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Dict=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=True , __magic_name__ : List[Any]=99 , __magic_name__ : List[Any]=36 , __magic_name__ : List[Any]=2 , __magic_name__ : str=4 , __magic_name__ : int=37 , __magic_name__ : int="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Tuple=0.02 , __magic_name__ : List[str]=6 , __magic_name__ : Dict=6 , __magic_name__ : Optional[Any]=3 , __magic_name__ : str=4 , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=10_00 , ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = parent
__snake_case : Tuple = batch_size
__snake_case : List[Any] = num_channels
__snake_case : Dict = image_size
__snake_case : Tuple = patch_size
__snake_case : str = is_training
__snake_case : Optional[Any] = use_input_mask
__snake_case : int = use_token_type_ids
__snake_case : str = use_labels
__snake_case : Dict = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Union[str, Any] = intermediate_size
__snake_case : str = hidden_act
__snake_case : Dict = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : int = max_position_embeddings
__snake_case : Optional[int] = type_vocab_size
__snake_case : Tuple = type_sequence_label_size
__snake_case : int = initializer_range
__snake_case : Optional[int] = coordinate_size
__snake_case : List[Any] = shape_size
__snake_case : Tuple = num_labels
__snake_case : List[Any] = num_choices
__snake_case : Optional[Any] = scope
__snake_case : List[str] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__snake_case : List[str] = text_seq_length
__snake_case : str = (image_size // patch_size) ** 2 + 1
__snake_case : Optional[Any] = self.text_seq_length + self.image_seq_length
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__snake_case : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__snake_case : Optional[int] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case : Union[str, Any] = bbox[i, j, 3]
__snake_case : Union[str, Any] = bbox[i, j, 1]
__snake_case : Any = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case : Optional[Any] = bbox[i, j, 2]
__snake_case : Tuple = bbox[i, j, 0]
__snake_case : Optional[Any] = tmp_coordinate
__snake_case : Dict = tf.constant(__magic_name__ )
__snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Any = None
if self.use_input_mask:
__snake_case : str = random_attention_mask([self.batch_size, self.text_seq_length] )
__snake_case : List[Any] = None
if self.use_token_type_ids:
__snake_case : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__snake_case : str = None
__snake_case : List[Any] = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__snake_case : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase__ ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = TFLayoutLMvaModel(config=__magic_name__ )
# text + image
__snake_case : Optional[int] = model(__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
__snake_case : List[str] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , training=__magic_name__ , )
__snake_case : Optional[int] = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__snake_case : Union[str, Any] = model(__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__snake_case : Optional[Any] = model({"""pixel_values""": pixel_values} , training=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : str ) -> Any:
"""simple docstring"""
__snake_case : Any = self.num_labels
__snake_case : Optional[int] = TFLayoutLMvaForSequenceClassification(config=__magic_name__ )
__snake_case : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Any , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : str = self.num_labels
__snake_case : str = TFLayoutLMvaForTokenClassification(config=__magic_name__ )
__snake_case : Tuple = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = 2
__snake_case : Dict = TFLayoutLMvaForQuestionAnswering(config=__magic_name__ )
__snake_case : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : Dict = config_and_inputs
__snake_case : List[Any] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Optional[int] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase__: Union[str, Any] = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowercase__: Dict = False
lowercase__: int = False
lowercase__: Dict = False
def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
return True
def lowercase__ ( self : int , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : int=False ) -> dict:
"""simple docstring"""
__snake_case : Any = copy.deepcopy(__magic_name__ )
if model_class in get_values(__magic_name__ ):
__snake_case : Union[str, Any] = {
k: tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__magic_name__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__magic_name__ ):
__snake_case : str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : int = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case : str = TFLayoutLMvaModelTester(self )
__snake_case : int = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(__magic_name__ )
if getattr(__magic_name__ , """hf_compute_loss""" , __magic_name__ ):
# The number of elements in the loss should be the same as the number of elements in the label
__snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Any = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__magic_name__ )[0]
]
__snake_case : List[str] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__snake_case : Any = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Tuple = prepared_for_class.pop("""input_ids""" )
__snake_case : Union[str, Any] = model(__magic_name__ , **__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__snake_case : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : str = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
__snake_case : str = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__snake_case : Dict = -1_00
__snake_case : str = tf.convert_to_tensor(__magic_name__ )
__snake_case : Optional[Any] = model(__magic_name__ , **__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__snake_case : Optional[int] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Tuple = model(__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
# Get keys that were added with the _prepare_for_class function
__snake_case : Tuple = prepared_for_class.keys() - inputs_dict.keys()
__snake_case : Optional[Any] = inspect.signature(model.call ).parameters
__snake_case : int = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__snake_case : Union[str, Any] = {0: """input_ids"""}
for label_key in label_keys:
__snake_case : int = signature_names.index(__magic_name__ )
__snake_case : Optional[int] = label_key
__snake_case : Optional[int] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__snake_case : Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__snake_case : List[str] = prepared_for_class[value]
__snake_case : str = tuple(__magic_name__ )
# Send to model
__snake_case : List[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Tuple = type
self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
@slow
def lowercase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : str = TFLayoutLMvaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None
@slow
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
__snake_case : Dict = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
__snake_case : str = self.default_image_processor
__snake_case : Union[str, Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=__magic_name__ , return_tensors="""tf""" ).pixel_values
__snake_case : Tuple = tf.constant([[1, 2]] )
__snake_case : Tuple = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__snake_case : List[Any] = model(input_ids=__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
# verify the logits
__snake_case : List[str] = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ )
__snake_case : Tuple = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 13
| 1
|
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Tuple ) -> int:
UpperCAmelCase : List[str] = parent
def UpperCAmelCase_ ( self : int ) -> List[Any]:
return {}
def UpperCamelCase( ):
UpperCAmelCase : Optional[Any] = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
UpperCAmelCase : List[str] = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = MarkupLMFeatureExtractor if is_bsa_available() else None
def UpperCAmelCase_ ( self : List[str] ) -> Any:
UpperCAmelCase : Dict = MarkupLMFeatureExtractionTester(self )
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
return self.feature_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
# Initialize feature_extractor
UpperCAmelCase : List[str] = self.feature_extraction_class()
# Test not batched input
UpperCAmelCase : int = get_html_strings()[0]
UpperCAmelCase : Optional[Any] = feature_extractor(lowercase_ )
# fmt: off
UpperCAmelCase : str = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
UpperCAmelCase : Optional[Any] = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes , lowercase_ )
self.assertEqual(encoding.xpaths , lowercase_ )
# Test batched
UpperCAmelCase : List[Any] = get_html_strings()
UpperCAmelCase : List[str] = feature_extractor(lowercase_ )
# fmt: off
UpperCAmelCase : Any = expected_nodes + [['My First Heading', 'My first paragraph.']]
UpperCAmelCase : str = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowercase_ )
self.assertEqual(encoding.xpaths , lowercase_ )
| 151
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : List[Any] , lowercase_ : int=3 , lowercase_ : Dict=32 , lowercase_ : Optional[Any]=3 , lowercase_ : Tuple=10 , lowercase_ : Optional[Any]=[10, 20, 30, 40] , lowercase_ : List[str]=[1, 1, 2, 1] , lowercase_ : Optional[int]=True , lowercase_ : str=True , lowercase_ : Dict="relu" , lowercase_ : Optional[Any]=3 , lowercase_ : List[str]=None , ) -> int:
UpperCAmelCase : Dict = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : Any = image_size
UpperCAmelCase : Any = num_channels
UpperCAmelCase : List[str] = embeddings_size
UpperCAmelCase : str = hidden_sizes
UpperCAmelCase : str = depths
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : int = use_labels
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : List[Any] = num_labels
UpperCAmelCase : Union[str, Any] = scope
UpperCAmelCase : Any = len(lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase_ ( self : int , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = TFResNetModel(config=lowercase_ )
UpperCAmelCase : int = model(lowercase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] ) -> List[Any]:
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Union[str, Any] = TFResNetForImageClassification(lowercase_ )
UpperCAmelCase : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCAmelCase_ : Dict = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Optional[int] = False
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : Optional[int] = TFResNetModelTester(self )
UpperCAmelCase : Dict = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
pass
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(lowercase_ )
UpperCAmelCase : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : str = [*signature.parameters.keys()]
UpperCAmelCase : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
def check_hidden_states_output(lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ):
UpperCAmelCase : Union[str, Any] = model_class(lowercase_ )
UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : int = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase : List[Any] = layer_type
UpperCAmelCase : int = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : List[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : str = TFResNetModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCamelCase( ):
UpperCAmelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase : Any = self.default_image_processor
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=lowercase_ , return_tensors='tf' )
# forward pass
UpperCAmelCase : List[Any] = model(**lowercase_ )
# verify the logits
UpperCAmelCase : Optional[Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCAmelCase : int = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowercase_ , atol=1E-4 ) )
| 151
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ :Tuple = logging.get_logger(__name__)
lowercase__ :List[Any] = {
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[int] ='''instructblip_vision_model'''
def __init__( self ,A__=1_4_0_8 ,A__=6_1_4_4 ,A__=3_9 ,A__=1_6 ,A__=2_2_4 ,A__=1_4 ,A__="gelu" ,A__=1E-6 ,A__=0.0 ,A__=1E-10 ,A__=True ,**A__ ,):
super().__init__(**A__)
lowercase = hidden_size
lowercase = intermediate_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = patch_size
lowercase = image_size
lowercase = initializer_range
lowercase = attention_dropout
lowercase = layer_norm_eps
lowercase = hidden_act
lowercase = qkv_bias
@classmethod
def A__ ( cls ,A__ ,**A__):
cls._set_token_in_kwargs(A__)
lowercase , lowercase = cls.get_config_dict(A__ ,**A__)
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''') == "instructblip":
lowercase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(A__ ,**A__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[Any] ='''instructblip_qformer'''
def __init__( self ,A__=3_0_5_2_2 ,A__=7_6_8 ,A__=1_2 ,A__=1_2 ,A__=3_0_7_2 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=0.02 ,A__=1E-12 ,A__=0 ,A__="absolute" ,A__=2 ,A__=1_4_0_8 ,**A__ ,):
super().__init__(pad_token_id=A__ ,**A__)
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = cross_attention_frequency
lowercase = encoder_hidden_size
@classmethod
def A__ ( cls ,A__ ,**A__):
cls._set_token_in_kwargs(A__)
lowercase , lowercase = cls.get_config_dict(A__ ,**A__)
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''') == "instructblip":
lowercase = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(A__ ,**A__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : str ='''instructblip'''
lowercase_ : str =True
def __init__( self ,A__=None ,A__=None ,A__=None ,A__=3_2 ,**A__):
super().__init__(**A__)
if vision_config is None:
lowercase = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''')
if qformer_config is None:
lowercase = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''')
if text_config is None:
lowercase = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''')
lowercase = InstructBlipVisionConfig(**A__)
lowercase = InstructBlipQFormerConfig(**A__)
lowercase = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
lowercase = CONFIG_MAPPING[text_model_type](**A__)
lowercase = self.text_config.tie_word_embeddings
lowercase = self.text_config.is_encoder_decoder
lowercase = num_query_tokens
lowercase = self.vision_config.hidden_size
lowercase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowercase = 1.0
lowercase = 0.02
@classmethod
def A__ ( cls ,A__ ,A__ ,A__ ,**A__ ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**A__ ,)
def A__ ( self):
lowercase = copy.deepcopy(self.__dict__)
lowercase = self.vision_config.to_dict()
lowercase = self.qformer_config.to_dict()
lowercase = self.text_config.to_dict()
lowercase = self.__class__.model_type
return output
| 97
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ :Any = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Tuple = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Optional[int] = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowercase__ :List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 97
| 1
|
def __lowercase ( _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Any = min(UpperCamelCase__ ) # min() finds the minimum value
SCREAMING_SNAKE_CASE : Optional[Any] = max(UpperCamelCase__ ) # max() finds the maximum value
SCREAMING_SNAKE_CASE : str = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
SCREAMING_SNAKE_CASE : List[Any] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for count in range(UpperCamelCase__ ):
while holes[count] > 0:
holes[count] -= 1
SCREAMING_SNAKE_CASE : Tuple = count + min_val
i += 1
def __lowercase ( ) -> List[Any]:
SCREAMING_SNAKE_CASE : Dict = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(UpperCamelCase__ )
print("""Sorted order is:""" , """ """.join(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
| 245
|
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =AlbertConfig.from_json_file(UpperCamelCase__ )
print(f"Building PyTorch model from configuration: {config}" )
SCREAMING_SNAKE_CASE__ : Any =AlbertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict(), UpperCamelCase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 152
| 0
|
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowercase ( A__ ):
"""simple docstring"""
_a = DistilBertTokenizer
_a = DistilBertTokenizerFast
_a = True
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
UpperCamelCase__ :Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 367
|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__snake_case = get_logger()
__snake_case = None
class lowercase ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
"""simple docstring"""
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ):
'''simple docstring'''
super().__init__(features=UpperCamelCase_ )
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(
F'''Expected {device} to be a `str` not {type(UpperCamelCase_ )}, as `jaxlib.xla_extension.Device` '''
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''' )
UpperCamelCase__ :Tuple = device if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase__ :Optional[Any] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
F'''device: {str(jax.devices()[0] )}.''' )
UpperCamelCase__ :Optional[int] = str(jax.devices()[0] )
UpperCamelCase__ :Tuple = jnp_array_kwargs
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
import jax
return {str(UpperCamelCase_ ): device for device in jax.devices()}
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and column:
if all(
isinstance(UpperCamelCase_ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(UpperCamelCase_ , axis=0 )
return column
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(UpperCamelCase_ , (str, bytes, type(UpperCamelCase_ )) ):
return value
elif isinstance(UpperCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCamelCase__ :Optional[int] = {}
if isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCamelCase__ :List[str] = {'''dtype''': jnp.intaa}
else:
UpperCamelCase__ :Union[str, Any] = {'''dtype''': jnp.intaa}
elif isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCamelCase__ :Optional[Any] = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
UpperCamelCase__ :str = np.asarray(UpperCamelCase_ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase__ :Dict = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCamelCase_ , **{**default_dtype, **self.jnp_array_kwargs} )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCamelCase_ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(UpperCamelCase_ , '''__array__''' ) and not isinstance(UpperCamelCase_ , jax.Array ):
UpperCamelCase__ :int = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase_ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , UpperCamelCase_ , map_list=UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.numpy_arrow_extractor().extract_row(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = self.python_features_decoder.decode_row(UpperCamelCase_ )
return self.recursive_tensorize(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.numpy_arrow_extractor().extract_column(UpperCamelCase_ )
UpperCamelCase__ :Tuple = self.python_features_decoder.decode_column(UpperCamelCase_ , pa_table.column_names[0] )
UpperCamelCase__ :Dict = self.recursive_tensorize(UpperCamelCase_ )
UpperCamelCase__ :str = self._consolidate(UpperCamelCase_ )
return column
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :int = self.numpy_arrow_extractor().extract_batch(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = self.python_features_decoder.decode_batch(UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.recursive_tensorize(UpperCamelCase_ )
for column_name in batch:
UpperCamelCase__ :Optional[int] = self._consolidate(batch[column_name] )
return batch
| 219
| 0
|
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> float:
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(snake_case__ , snake_case__ ) ) )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> list[list[list[float] | float]]:
if dataset.ndim != value_array.ndim:
lowerCAmelCase = (
'''Wrong input data\'s dimensions... '''
f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(snake_case__ )
try:
if dataset.shape[1] != value_array.shape[1]:
lowerCAmelCase = (
'''Wrong input data\'s shape... '''
f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(snake_case__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
lowerCAmelCase = (
'''Input data have different datatype... '''
f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(snake_case__ )
lowerCAmelCase = []
for value in value_array:
lowerCAmelCase = euclidean(snake_case__ , dataset[0] )
lowerCAmelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowerCAmelCase = euclidean(snake_case__ , snake_case__ )
if dist > temp_dist:
lowerCAmelCase = temp_dist
lowerCAmelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> float:
return np.dot(snake_case__ , snake_case__ ) / (norm(snake_case__ ) * norm(snake_case__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = KarrasVeScheduler()
lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' , return_dict=__SCREAMING_SNAKE_CASE )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase = '''google/ncsnpp-celebahq-256'''
lowerCAmelCase = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = KarrasVeScheduler()
lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 338
| 1
|
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def __a ( self :int) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :Optional[int]) -> Union[str, Any]:
UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''')
UpperCAmelCase_ = sd_pipe.to(_lowercase)
sd_pipe.set_progress_bar_config(disable=_lowercase)
sd_pipe.set_scheduler('''sample_euler''')
UpperCAmelCase_ = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ = torch.manual_seed(0)
UpperCAmelCase_ = sd_pipe([prompt] , generator=_lowercase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''')
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def __a ( self :Union[str, Any]) -> Union[str, Any]:
UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''')
UpperCAmelCase_ = sd_pipe.to(_lowercase)
sd_pipe.set_progress_bar_config(disable=_lowercase)
sd_pipe.set_scheduler('''sample_euler''')
UpperCAmelCase_ = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ = torch.manual_seed(0)
UpperCAmelCase_ = sd_pipe([prompt] , generator=_lowercase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''')
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-1
def __a ( self :List[str]) -> str:
UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''')
UpperCAmelCase_ = sd_pipe.to(_lowercase)
sd_pipe.set_progress_bar_config(disable=_lowercase)
sd_pipe.set_scheduler('''sample_dpmpp_2m''')
UpperCAmelCase_ = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ = torch.manual_seed(0)
UpperCAmelCase_ = sd_pipe(
[prompt] , generator=_lowercase , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=_lowercase , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 357
|
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = "▁"
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : str =BigBirdTokenizer
UpperCamelCase__ : Tuple =BigBirdTokenizerFast
UpperCamelCase__ : Union[str, Any] =True
UpperCamelCase__ : List[str] =True
def __a ( self :Any) -> List[str]:
super().setUp()
UpperCAmelCase_ = self.tokenizer_class(_lowercase , keep_accents=_lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :Optional[int]) -> str:
UpperCAmelCase_ = '''<s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase)
def __a ( self :str) -> str:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''[MASK]''')
self.assertEqual(len(_lowercase) , 1004)
def __a ( self :List[str]) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def __a ( self :Tuple) -> int:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ = tokenizer.tokenize(_lowercase)
UpperCAmelCase_ = rust_tokenizer.tokenize(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :Optional[Any]) -> List[str]:
UpperCAmelCase_ = BigBirdTokenizer(_lowercase , keep_accents=_lowercase)
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase) , [285, 46, 10, 170, 382] , )
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowercase)
self.assertListEqual(
_lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowercase)
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __a ( self :Any) -> List[Any]:
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
@slow
def __a ( self :int) -> List[Any]:
UpperCAmelCase_ = '''Hello World!'''
UpperCAmelCase_ = [65, 18536, 2260, 101, 66]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@slow
def __a ( self :int) -> Any:
UpperCAmelCase_ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase_ = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@require_torch
@slow
def __a ( self :Dict) -> Union[str, Any]:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase_ = list(self.big_tokenizer.get_vocab().keys())[:10]
UpperCAmelCase_ = ''' '''.join(_lowercase)
UpperCAmelCase_ = self.big_tokenizer.encode_plus(_lowercase , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = BigBirdConfig(attention_type='''original_full''')
UpperCAmelCase_ = BigBirdModel(_lowercase)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowercase)
model(**_lowercase)
@slow
def __a ( self :Optional[int]) -> Any:
UpperCAmelCase_ = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
UpperCAmelCase_ = tokenizer.decode(tokenizer('''Paris is the [MASK].''').input_ids)
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''')
@slow
def __a ( self :Dict) -> List[str]:
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 344
| 0
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowercase :
"""simple docstring"""
@staticmethod
def _SCREAMING_SNAKE_CASE ( *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : int):
pass
@is_pipeline_test
@require_vision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: int = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
SCREAMING_SNAKE_CASE_: Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
SCREAMING_SNAKE_CASE_: List[str] = image_classifier(lowerCAmelCase__ , candidate_labels=["a", "b", "c"])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(lowerCAmelCase__) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
SCREAMING_SNAKE_CASE_: Tuple = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [
[
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
],
] , )
@require_tf
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf")
SCREAMING_SNAKE_CASE_: Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
SCREAMING_SNAKE_CASE_: Tuple = image_classifier(lowerCAmelCase__ , candidate_labels=["a", "b", "c"])
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
SCREAMING_SNAKE_CASE_: Optional[Any] = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [
[
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
{"score": 0.333, "label": ANY(lowerCAmelCase__)},
],
] , )
@slow
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: str = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE_: str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
SCREAMING_SNAKE_CASE_: int = image_classifier(lowerCAmelCase__ , candidate_labels=["cat", "plane", "remote"])
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
SCREAMING_SNAKE_CASE_: Dict = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf")
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE_: Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
SCREAMING_SNAKE_CASE_: str = image_classifier(lowerCAmelCase__ , candidate_labels=["cat", "plane", "remote"])
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
SCREAMING_SNAKE_CASE_: str = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 13
|
class __lowercase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]):
SCREAMING_SNAKE_CASE_: List[str] = name
SCREAMING_SNAKE_CASE_: Union[str, Any] = val
def __str__( self : Dict):
return F"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self : List[str] , lowerCAmelCase__ : Any):
return self.val < other.val
class __lowercase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: int = {}
SCREAMING_SNAKE_CASE_: Any = self.build_heap(lowerCAmelCase__)
def __getitem__( self : List[Any] , lowerCAmelCase__ : Dict):
return self.get_value(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Dict):
return (idx - 1) // 2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any]):
return idx * 2 + 1
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
return idx * 2 + 2
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
return self.heap_dict[key]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = len(lowerCAmelCase__) - 1
SCREAMING_SNAKE_CASE_: List[str] = self.get_parent_idx(lowerCAmelCase__)
for idx, i in enumerate(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Union[str, Any] = idx
SCREAMING_SNAKE_CASE_: str = i.val
for i in range(lowerCAmelCase__ , -1 , -1):
self.sift_down(lowerCAmelCase__ , lowerCAmelCase__)
return array
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str]):
while True:
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_left_child_idx(lowerCAmelCase__) # noqa: E741
SCREAMING_SNAKE_CASE_: Dict = self.get_right_child_idx(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = idx
if l < len(lowerCAmelCase__) and array[l] < array[idx]:
SCREAMING_SNAKE_CASE_: List[str] = l
if r < len(lowerCAmelCase__) and array[r] < array[smallest]:
SCREAMING_SNAKE_CASE_: str = r
if smallest != idx:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = array[smallest], array[idx]
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): Optional[Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
SCREAMING_SNAKE_CASE_: Optional[int] = smallest
else:
break
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: Any = self.get_parent_idx(lowerCAmelCase__)
while p >= 0 and self.heap[p] > self.heap[idx]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = self.heap[idx], self.heap[p]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
SCREAMING_SNAKE_CASE_: Union[str, Any] = p
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_parent_idx(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return self.heap[0]
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.heap[-1], self.heap[0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
SCREAMING_SNAKE_CASE_: int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap)
return x
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
self.heap.append(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = len(self.heap) - 1
SCREAMING_SNAKE_CASE_: List[str] = node.val
self.sift_up(len(self.heap) - 1)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return len(self.heap) == 0
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int]):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
SCREAMING_SNAKE_CASE_: Any = new_value
SCREAMING_SNAKE_CASE_: Tuple = new_value
self.sift_up(self.idx_of_element[node])
lowerCAmelCase : int = Node("""R""", -1)
lowerCAmelCase : str = Node("""B""", 6)
lowerCAmelCase : str = Node("""A""", 3)
lowerCAmelCase : List[str] = Node("""X""", 1)
lowerCAmelCase : Union[str, Any] = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
lowerCAmelCase : Optional[Any] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13
| 1
|
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = JsonDatasetReader(_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase ).read()
_check_json_dataset(_UpperCamelCase , _UpperCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(_UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = JsonDatasetReader(_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase ).read()
_check_json_dataset(_UpperCamelCase , _UpperCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(_UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = JsonDatasetReader(_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase ).read()
assert isinstance(_UpperCamelCase , _UpperCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
__lowerCAmelCase = features.copy()
__lowerCAmelCase = (
Features({feature: Value(_UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = JsonDatasetReader(_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase ).read()
assert isinstance(_UpperCamelCase , _UpperCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = JsonDatasetReader(_UpperCamelCase , cache_dir=_UpperCamelCase , split=_UpperCamelCase ).read()
_check_json_dataset(_UpperCamelCase , _UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if issubclass(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = jsonl_path
elif issubclass(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = [jsonl_path]
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = JsonDatasetReader(_UpperCamelCase , cache_dir=_UpperCamelCase ).read()
_check_json_dataset(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=("train",) ):
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase )
for split in splits:
__lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase ).read()
_check_json_datasetdict(_UpperCamelCase , _UpperCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(_UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , features=_UpperCamelCase , cache_dir=_UpperCamelCase ).read()
_check_json_datasetdict(_UpperCamelCase , _UpperCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if split:
__lowerCAmelCase = {split: jsonl_path}
else:
__lowerCAmelCase = "train"
__lowerCAmelCase = {"train": jsonl_path, "test": jsonl_path}
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = JsonDatasetReader(_UpperCamelCase , cache_dir=_UpperCamelCase ).read()
_check_json_datasetdict(_UpperCamelCase , _UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return json.load(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return [json.loads(_UpperCamelCase ) for line in buffer]
class _UpperCamelCase :
'''simple docstring'''
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def snake_case ( self , __a , __a , __a ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowercase , __lowercase , lines=__lowercase ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json_function(__lowercase )
assert isinstance(__lowercase , __lowercase )
assert isinstance(exported_content[0] , __lowercase )
assert len(__lowercase ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def snake_case ( self , __a , __a , __a , __a , __a ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowercase , __lowercase , lines=__lowercase , orient=__lowercase ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json(__lowercase )
assert isinstance(__lowercase , __lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__lowercase , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__lowercase ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def snake_case ( self , __a , __a , __a ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowercase , __lowercase , lines=__lowercase , num_proc=2 ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json_function(__lowercase )
assert isinstance(__lowercase , __lowercase )
assert isinstance(exported_content[0] , __lowercase )
assert len(__lowercase ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def snake_case ( self , __a , __a , __a , __a , __a ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowercase , __lowercase , lines=__lowercase , orient=__lowercase , num_proc=2 ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json(__lowercase )
assert isinstance(__lowercase , __lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__lowercase , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__lowercase ) == 10
def snake_case ( self , __a ):
with pytest.raises(__lowercase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowercase , __lowercase , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def snake_case ( self , __a , __a , __a , __a , __a ):
__lowerCAmelCase = tmp_path_factory.mktemp("data" ) / f"test.json.{extension}"
__lowerCAmelCase = str(shared_datadir / f"test_file.json.{extension}" )
JsonDatasetWriter(__lowercase , __lowercase , compression=__lowercase ).write()
with fsspec.open(__lowercase , "rb" , compression="infer" ) as f:
__lowerCAmelCase = f.read()
with fsspec.open(__lowercase , "rb" , compression="infer" ) as f:
__lowerCAmelCase = f.read()
assert exported_content == original_content
| 359
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""transformers""", """torch""", """note_seq"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 259
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'vivit'
def __init__( self , UpperCamelCase_=224 , UpperCamelCase_=32 , UpperCamelCase_=[2, 16, 16] , UpperCamelCase_=3 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu_fast" , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-06 , UpperCamelCase_=True , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Tuple = hidden_size
UpperCamelCase__ :int = num_hidden_layers
UpperCamelCase__ :Optional[Any] = num_attention_heads
UpperCamelCase__ :List[str] = intermediate_size
UpperCamelCase__ :Dict = hidden_act
UpperCamelCase__ :str = hidden_dropout_prob
UpperCamelCase__ :Optional[Any] = attention_probs_dropout_prob
UpperCamelCase__ :Optional[int] = initializer_range
UpperCamelCase__ :int = layer_norm_eps
UpperCamelCase__ :List[Any] = image_size
UpperCamelCase__ :Optional[Any] = num_frames
UpperCamelCase__ :Dict = tubelet_size
UpperCamelCase__ :Optional[int] = num_channels
UpperCamelCase__ :List[Any] = qkv_bias
super().__init__(**UpperCamelCase_ )
| 97
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'camembert'
def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Optional[int] = hidden_size
UpperCamelCase__ :Optional[int] = num_hidden_layers
UpperCamelCase__ :List[Any] = num_attention_heads
UpperCamelCase__ :Union[str, Any] = hidden_act
UpperCamelCase__ :List[Any] = intermediate_size
UpperCamelCase__ :int = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[str] = layer_norm_eps
UpperCamelCase__ :int = position_embedding_type
UpperCamelCase__ :Any = use_cache
UpperCamelCase__ :Any = classifier_dropout
class lowercase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ :List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ :Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 97
| 1
|
import re
from filelock import FileLock
try:
import nltk
A__ = True
except (ImportError, ModuleNotFoundError):
A__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
re.sub('''<n>''' , '''''' , __lowerCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__lowerCAmelCase ) )
| 44
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
A__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
A__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
A__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> tuple[str, float]:
"""simple docstring"""
snake_case__ : List[Any] = len([g for position, g in enumerate(__lowerCAmelCase ) if g == main_target[position]] )
return (item, float(__lowerCAmelCase ))
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> tuple[str, str]:
"""simple docstring"""
snake_case__ : Tuple = random.randint(0 , len(__lowerCAmelCase ) - 1 )
snake_case__ : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:]
snake_case__ : Any = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : Dict = list(__lowerCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
snake_case__ : List[str] = random.choice(__lowerCAmelCase )
return "".join(__lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> list[str]:
"""simple docstring"""
snake_case__ : Dict = []
# Generate more children proportionally to the fitness score.
snake_case__ : List[str] = int(parent_a[1] * 100 ) + 1
snake_case__ : Optional[int] = 10 if child_n >= 10 else child_n
for _ in range(__lowerCAmelCase ):
snake_case__ : List[str] = population_score[random.randint(0 , __lowerCAmelCase )][0]
snake_case__ , snake_case__ : Union[str, Any] = crossover(parent_a[0] , __lowerCAmelCase )
# Append new string to the population list.
pop.append(mutate(__lowerCAmelCase , __lowerCAmelCase ) )
pop.append(mutate(__lowerCAmelCase , __lowerCAmelCase ) )
return pop
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
snake_case__ : List[str] = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(__lowerCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
snake_case__ : List[Any] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
snake_case__ : List[str] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(__lowerCAmelCase )
# Generate random starting population.
snake_case__ : Tuple = []
for _ in range(__lowerCAmelCase ):
population.append(''''''.join([random.choice(__lowerCAmelCase ) for i in range(len(__lowerCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
snake_case__ , snake_case__ : Union[str, Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__lowerCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
snake_case__ : str = [evaluate(__lowerCAmelCase , __lowerCAmelCase ) for item in population]
# Check if there is a matching evolution.
snake_case__ : str = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : x[1] , reverse=__lowerCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
snake_case__ : Optional[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__lowerCAmelCase )
# Normalize population score to be between 0 and 1.
snake_case__ : Dict = [
(item, score / len(__lowerCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(__lowerCAmelCase ):
population.extend(select(population_score[int(__lowerCAmelCase )] , __lowerCAmelCase , __lowerCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__lowerCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
A__ = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
A__ = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
A__ , A__ , A__ = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 44
| 1
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__a = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __UpperCAmelCase ( a_: Optional[int] ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__UpperCamelCase )
def __UpperCAmelCase ( a_: Dict ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : List[str] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__UpperCamelCase, id=__UpperCamelCase )
| 145
|
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __snake_case :
def __init__( self : Optional[int] , _lowercase : Union[str, Any] , _lowercase : List[Any]=13 , _lowercase : List[Any]=7 , _lowercase : Optional[int]=True , _lowercase : str=True , _lowercase : str=True , _lowercase : List[str]=True , _lowercase : List[str]=99 , _lowercase : List[str]=32 , _lowercase : str=5 , _lowercase : str=4 , _lowercase : str=4 , _lowercase : Union[str, Any]="gelu" , _lowercase : str=0.0 , _lowercase : Union[str, Any]=0.1 , _lowercase : List[str]=True , _lowercase : Union[str, Any]=5_12 , _lowercase : List[str]=16 , _lowercase : Dict=2 , _lowercase : int=0.02 , _lowercase : Any=3 , _lowercase : int=4 , _lowercase : List[str]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_multiple_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = weight_tying
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_ids, input_mask, token_labels
def __a ( self : Optional[int] ):
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = True
return config, input_ids, input_mask, token_labels
def __a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : int , _lowercase : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = GPTNeoXJapaneseModel(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_lowercase , attention_mask=_lowercase )
SCREAMING_SNAKE_CASE__ = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : Dict , _lowercase : int , _lowercase : str , _lowercase : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = GPTNeoXJapaneseModel(_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_lowercase , attention_mask=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : Any , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = GPTNeoXJapaneseForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : int , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = GPTNeoXJapaneseForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE__ = model(_lowercase , attention_mask=_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ = model(_lowercase , attention_mask=_lowercase , output_hidden_states=_lowercase )
SCREAMING_SNAKE_CASE__ = output_from_no_past["""hidden_states"""][0]
SCREAMING_SNAKE_CASE__ = model(
_lowercase , attention_mask=_lowercase , past_key_values=_lowercase , output_hidden_states=_lowercase , )["""hidden_states"""][0]
# select random slice
SCREAMING_SNAKE_CASE__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-3 ) )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCAmelCase_ = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCAmelCase_ = (
{"feature-extraction": GPTNeoXJapaneseModel, "text-generation": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = GPTNeoXJapaneseModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def __a ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowercase , _lowercase , _lowercase )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_lowercase , _lowercase , _lowercase )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_decoder()
SCREAMING_SNAKE_CASE__ = None
self.model_tester.create_and_check_model_as_decoder(_lowercase , _lowercase , _lowercase )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_lowercase , _lowercase , _lowercase )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_lowercase )
@slow
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """abeja/gpt-neox-japanese-2.7b"""
SCREAMING_SNAKE_CASE__ = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
SCREAMING_SNAKE_CASE__ = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
SCREAMING_SNAKE_CASE__ = GPTNeoXJapaneseTokenizer.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ = GPTNeoXJapaneseForCausalLM.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ = []
for prompt in prompts:
SCREAMING_SNAKE_CASE__ = tokenizer(_lowercase , return_tensors="""pt""" ).input_ids
SCREAMING_SNAKE_CASE__ = model.generate(_lowercase , max_length=50 )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
predicted_outputs += generated_string
self.assertListEqual(_lowercase , _lowercase )
| 219
| 0
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__snake_case = logging.getLogger(__name__)
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] = """summarization"""
__lowerCamelCase : Optional[Any] = ["""loss"""]
__lowerCamelCase : List[str] = ROUGE_KEYS
__lowerCamelCase : int = """rouge2"""
def __init__( self , snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
UpperCAmelCase : str =False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(snake_case__ , num_labels=snake_case__ , mode=self.mode , **snake_case__ )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
UpperCAmelCase : Tuple =Path(self.output_dir ) / '''metrics.json'''
UpperCAmelCase : Any =Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
UpperCAmelCase : Union[str, Any] =0
UpperCAmelCase : Any =defaultdict(snake_case__ )
UpperCAmelCase : Union[str, Any] =self.config.model_type
UpperCAmelCase : Any =self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
UpperCAmelCase : dict ={
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
UpperCAmelCase : Union[str, Any] ={
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
UpperCAmelCase : Dict ={k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
UpperCAmelCase : Any ={
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], f'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
UpperCAmelCase : Union[str, Any] =get_git_info()['''repo_sha''']
UpperCAmelCase : Optional[int] =hparams.num_workers
UpperCAmelCase : Tuple =None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , snake_case__ ):
UpperCAmelCase : Union[str, Any] =self.tokenizer.lang_code_to_id[hparams.tgt_lang]
UpperCAmelCase : List[str] =self.decoder_start_token_id
UpperCAmelCase : str =(
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
UpperCAmelCase : Union[str, Any] =False
UpperCAmelCase : List[Any] =self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
UpperCAmelCase : Tuple =self.hparams.eval_max_gen_length
else:
UpperCAmelCase : Union[str, Any] =self.model.config.max_length
UpperCAmelCase : Dict =self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCAmelCase__ ( self , snake_case__ ) -> Dict[str, List[str]]:
'''simple docstring'''
UpperCAmelCase : List[Any] ={
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(snake_case__ , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
UpperCAmelCase : Any =True
return readable_batch
def UpperCAmelCase__ ( self , snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
return self.model(snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Dict =self.tokenizer.batch_decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
return lmap(str.strip , snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Dict =self.tokenizer.pad_token_id
UpperCAmelCase : int =batch['''input_ids'''], batch['''attention_mask''']
UpperCAmelCase : List[Any] =batch['''labels''']
if isinstance(self.model , snake_case__ ):
UpperCAmelCase : Any =self.model._shift_right(snake_case__ )
else:
UpperCAmelCase : Tuple =shift_tokens_right(snake_case__ , snake_case__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
UpperCAmelCase : Any =decoder_input_ids
self.save_readable_batch(snake_case__ )
UpperCAmelCase : Dict =self(snake_case__ , attention_mask=snake_case__ , decoder_input_ids=snake_case__ , use_cache=snake_case__ )
UpperCAmelCase : List[Any] =outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
UpperCAmelCase : Optional[Any] =nn.CrossEntropyLoss(ignore_index=snake_case__ )
assert lm_logits.shape[-1] == self.vocab_size
UpperCAmelCase : Dict =ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
UpperCAmelCase : str =nn.functional.log_softmax(snake_case__ , dim=-1 )
UpperCAmelCase : Any =label_smoothed_nll_loss(
snake_case__ , snake_case__ , self.hparams.label_smoothing , ignore_index=snake_case__ )
return (loss,)
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return self.tokenizer.pad_token_id
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[str] =self._step(snake_case__ )
UpperCAmelCase : Any =dict(zip(self.loss_names , snake_case__ ) )
# tokens per batch
UpperCAmelCase : List[str] =batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
UpperCAmelCase : int =batch['''input_ids'''].shape[0]
UpperCAmelCase : List[Any] =batch['''input_ids'''].eq(self.pad ).sum()
UpperCAmelCase : Any =batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Dict:
'''simple docstring'''
return self._generative_step(snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__="val" ) -> Dict:
'''simple docstring'''
self.step_count += 1
UpperCAmelCase : Dict ={k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
UpperCAmelCase : Any =losses['''loss''']
UpperCAmelCase : Union[str, Any] ={
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
UpperCAmelCase : Optional[Any] =(
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
UpperCAmelCase : torch.FloatTensor =torch.tensor(snake_case__ ).type_as(snake_case__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(snake_case__ )
UpperCAmelCase : str ={f'''{prefix}_avg_{k}''': x for k, x in losses.items()}
UpperCAmelCase : Optional[int] =self.step_count
self.metrics[prefix].append(snake_case__ ) # callback writes this to self.metrics_save_path
UpperCAmelCase : Union[str, Any] =flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'''{prefix}_loss''': loss,
f'''{prefix}_{self.val_metric}''': metric_tensor,
}
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Dict:
'''simple docstring'''
return calculate_rouge(snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ ) -> dict:
'''simple docstring'''
UpperCAmelCase : str =time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
UpperCAmelCase : Union[str, Any] =self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=snake_case__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
UpperCAmelCase : Union[str, Any] =(time.time() - ta) / batch['''input_ids'''].shape[0]
UpperCAmelCase : List[str] =self.ids_to_clean_text(snake_case__ )
UpperCAmelCase : List[str] =self.ids_to_clean_text(batch['''labels'''] )
UpperCAmelCase : str =self._step(snake_case__ )
UpperCAmelCase : Optional[Any] =dict(zip(self.loss_names , snake_case__ ) )
UpperCAmelCase : Dict =self.calc_generative_metrics(snake_case__ , snake_case__ )
UpperCAmelCase : int =np.mean(lmap(snake_case__ , snake_case__ ) )
base_metrics.update(gen_time=snake_case__ , gen_len=snake_case__ , preds=snake_case__ , target=snake_case__ , **snake_case__ )
return base_metrics
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Tuple:
'''simple docstring'''
return self._generative_step(snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ ) -> Tuple:
'''simple docstring'''
return self.validation_epoch_end(snake_case__ , prefix='''test''' )
def UpperCAmelCase__ ( self , snake_case__ ) -> SeqaSeqDataset:
'''simple docstring'''
UpperCAmelCase : List[Any] =self.n_obs[type_path]
UpperCAmelCase : Optional[int] =self.target_lens[type_path]
UpperCAmelCase : Optional[int] =self.dataset_class(
self.tokenizer , type_path=snake_case__ , n_obs=snake_case__ , max_target_length=snake_case__ , **self.dataset_kwargs , )
return dataset
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ = False ) -> DataLoader:
'''simple docstring'''
UpperCAmelCase : Tuple =self.get_dataset(snake_case__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
UpperCAmelCase : Optional[int] =dataset.make_sortish_sampler(snake_case__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
snake_case__ , batch_size=snake_case__ , collate_fn=dataset.collate_fn , shuffle=snake_case__ , num_workers=self.num_workers , sampler=snake_case__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
UpperCAmelCase : Optional[int] =dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
snake_case__ , batch_sampler=snake_case__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
snake_case__ , batch_size=snake_case__ , collate_fn=dataset.collate_fn , shuffle=snake_case__ , num_workers=self.num_workers , sampler=snake_case__ , )
def UpperCAmelCase__ ( self ) -> DataLoader:
'''simple docstring'''
UpperCAmelCase : Dict =self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=snake_case__ )
return dataloader
def UpperCAmelCase__ ( self ) -> DataLoader:
'''simple docstring'''
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def UpperCAmelCase__ ( self ) -> DataLoader:
'''simple docstring'''
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCAmelCase__ ( snake_case__ , snake_case__ ) -> Dict:
'''simple docstring'''
BaseTransformer.add_model_specific_args(snake_case__ , snake_case__ )
add_generic_args(snake_case__ , snake_case__ )
parser.add_argument(
'''--max_source_length''' , default=1024 , type=snake_case__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=snake_case__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=snake_case__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=snake_case__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=snake_case__ )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=snake_case__ )
parser.add_argument('''--max_tokens_per_batch''' , type=snake_case__ , default=snake_case__ )
parser.add_argument('''--logger_name''' , type=snake_case__ , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=snake_case__ , default=-1 , required=snake_case__ , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=snake_case__ , default=500 , required=snake_case__ , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=snake_case__ , default=-1 , required=snake_case__ , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=snake_case__ , default='''summarization''' , required=snake_case__ , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=snake_case__ , default=0.0 , required=snake_case__ )
parser.add_argument('''--src_lang''' , type=snake_case__ , default='''''' , required=snake_case__ )
parser.add_argument('''--tgt_lang''' , type=snake_case__ , default='''''' , required=snake_case__ )
parser.add_argument('''--eval_beams''' , type=snake_case__ , default=snake_case__ , required=snake_case__ )
parser.add_argument(
'''--val_metric''' , type=snake_case__ , default=snake_case__ , required=snake_case__ , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=snake_case__ , default=snake_case__ , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=snake_case__ , default=1 , required=snake_case__ , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=snake_case__ , default=-1 , required=snake_case__ , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : str = """translation"""
__lowerCamelCase : int = ["""loss"""]
__lowerCamelCase : int = ["""bleu"""]
__lowerCamelCase : List[Any] = """bleu"""
def __init__( self , snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
super().__init__(snake_case__ , **snake_case__ )
UpperCAmelCase : Dict =hparams.src_lang
UpperCAmelCase : int =hparams.tgt_lang
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> dict:
'''simple docstring'''
return calculate_bleu(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=None )-> SummarizationModule:
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=__lowerCAmelCase )
check_output_dir(__lowerCAmelCase , expected_items=3 )
if model is None:
if "summarization" in args.task:
UpperCAmelCase : SummarizationModule =SummarizationModule(__lowerCAmelCase )
else:
UpperCAmelCase : SummarizationModule =TranslationModule(__lowerCAmelCase )
UpperCAmelCase : Union[str, Any] =Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
UpperCAmelCase : Dict =True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase : List[str] =os.environ.get('''WANDB_PROJECT''' , __lowerCAmelCase )
UpperCAmelCase : List[Any] =WandbLogger(name=model.output_dir.name , project=__lowerCAmelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase : List[Any] =WandbLogger(name=model.output_dir.name , project=f'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
UpperCAmelCase : Any =get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
UpperCAmelCase : Dict =False
UpperCAmelCase : Optional[int] =args.val_metric == '''loss'''
UpperCAmelCase : pl.Trainer =generic_train(
__lowerCAmelCase , __lowerCAmelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __lowerCAmelCase ) , early_stopping_callback=__lowerCAmelCase , logger=__lowerCAmelCase , )
pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
UpperCAmelCase : str =''''''
UpperCAmelCase : Any =sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=__lowerCAmelCase ) )
if checkpoints:
UpperCAmelCase : Tuple =checkpoints[-1]
UpperCAmelCase : Union[str, Any] =checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
__snake_case = pl.Trainer.add_argparse_args(parser)
__snake_case = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__snake_case = parser.parse_args()
main(args)
| 353
|
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''vocab.json'''}
__snake_case = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
__snake_case = {'''mgp-str''': 27}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , snake_case__ , snake_case__="[GO]" , snake_case__="[GO]" , snake_case__="[s]" , snake_case__="[GO]" , **snake_case__ ) -> Any:
'''simple docstring'''
super().__init__(
unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , **snake_case__ , )
with open(snake_case__ , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase : int =json.load(snake_case__ )
UpperCAmelCase : List[str] ={v: k for k, v in self.vocab.items()}
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return len(self.vocab )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self , snake_case__ ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =[]
for s in text:
char_tokens.extend(snake_case__ )
return char_tokens
def UpperCAmelCase__ ( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
return self.vocab.get(snake_case__ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(snake_case__ ) )
return
UpperCAmelCase : List[Any] =os.path.join(
snake_case__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + '''\n''' )
return (vocab_file,)
| 78
| 0
|
from manim import *
class __A ( __A ):
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase : str = [mem.copy() for i in range(6 )]
lowerCAmelCase : Any = [mem.copy() for i in range(6 )]
lowerCAmelCase : List[str] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
lowerCAmelCase : Optional[Any] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
lowerCAmelCase : Any = VGroup(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
lowerCAmelCase : int = Text('CPU' , font_size=24 )
lowerCAmelCase : Any = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCamelCase )
lowerCAmelCase : List[Any] = [mem.copy() for i in range(4 )]
lowerCAmelCase : Dict = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
lowerCAmelCase : Dict = Text('GPU' , font_size=24 )
lowerCAmelCase : Optional[Any] = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(_lowerCamelCase )
lowerCAmelCase : List[str] = [mem.copy() for i in range(6 )]
lowerCAmelCase : List[str] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
lowerCAmelCase : str = Text('Model' , font_size=24 )
lowerCAmelCase : str = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.add(_lowerCamelCase )
lowerCAmelCase : Optional[int] = []
for i, rect in enumerate(_lowerCamelCase ):
rect.set_stroke(_lowerCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
lowerCAmelCase : Optional[int] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_lowerCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowerCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=_lowerCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=_lowerCamelCase , buff=0.0 )
self.add(_lowerCamelCase )
cpu_targs.append(_lowerCamelCase )
lowerCAmelCase : List[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase : Optional[int] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
lowerCAmelCase : int = Text('Loaded Checkpoint' , font_size=24 )
lowerCAmelCase : str = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , aligned_edge=_lowerCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
lowerCAmelCase : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase : List[Any] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase : List[Any] = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
lowerCAmelCase : Any = MarkupText(
f"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCamelCase ) , Write(_lowerCamelCase ) )
self.play(Write(_lowerCamelCase , run_time=1 ) , Create(_lowerCamelCase , run_time=1 ) )
lowerCAmelCase : List[str] = []
lowerCAmelCase : str = []
for i, rect in enumerate(_lowerCamelCase ):
lowerCAmelCase : Union[str, Any] = fill.copy().set_fill(_lowerCamelCase , opacity=0.7 )
target.move_to(_lowerCamelCase )
first_animations.append(GrowFromCenter(_lowerCamelCase , run_time=1 ) )
lowerCAmelCase : Optional[int] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(_lowerCamelCase , run_time=1.5 ) )
self.play(*_lowerCamelCase )
self.play(*_lowerCamelCase )
self.wait()
| 138
|
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
A_ : Optional[Any] = name
A_ : Dict = value
A_ : Union[str, Any] = weight
def __repr__( self ) -> List[str]:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return self.value
def UpperCAmelCase_ ( self ) -> List[str]:
return self.name
def UpperCAmelCase_ ( self ) -> Tuple:
return self.weight
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.value / self.weight
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Optional[int] = []
for i in range(len(a_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[Any]:
"""simple docstring"""
A_ : Optional[Any] = sorted(a_ , key=a_ , reverse=a_ )
A_ : str = []
A_ , A_ : Dict = 0.0, 0.0
for i in range(len(a_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344
| 0
|
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
random.seed(_SCREAMING_SNAKE_CASE )
np.random.seed(_SCREAMING_SNAKE_CASE )
torch.manual_seed(_SCREAMING_SNAKE_CASE )
torch.cuda.manual_seed_all(_SCREAMING_SNAKE_CASE )
# ^^ safe to call this function even if cuda is not available
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : Optional[Any] ,lowerCamelCase__ : Iterable[torch.nn.Parameter] ,lowerCamelCase__ : float = 0.9999 ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : int = 0 ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Union[float, int] = 1.0 ,lowerCamelCase__ : Union[float, int] = 2 / 3 ,lowerCamelCase__ : Optional[Any] = None ,lowerCamelCase__ : Dict[str, Any] = None ,**lowerCamelCase__ : Any ,) -> Union[str, Any]:
'''simple docstring'''
if isinstance(lowerCamelCase__ ,torch.nn.Module ):
SCREAMING_SNAKE_CASE = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" ,"""1.0.0""" ,lowerCamelCase__ ,standard_warn=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
SCREAMING_SNAKE_CASE = True
if kwargs.get("""max_value""" ,lowerCamelCase__ ) is not None:
SCREAMING_SNAKE_CASE = """The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" ,"""1.0.0""" ,lowerCamelCase__ ,standard_warn=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = kwargs["""max_value"""]
if kwargs.get("""min_value""" ,lowerCamelCase__ ) is not None:
SCREAMING_SNAKE_CASE = """The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" ,"""1.0.0""" ,lowerCamelCase__ ,standard_warn=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = kwargs["""min_value"""]
SCREAMING_SNAKE_CASE = list(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = [p.clone().detach() for p in parameters]
if kwargs.get("""device""" ,lowerCamelCase__ ) is not None:
SCREAMING_SNAKE_CASE = """The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" ,"""1.0.0""" ,lowerCamelCase__ ,standard_warn=lowerCamelCase__ )
self.to(device=kwargs["""device"""] )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = decay
SCREAMING_SNAKE_CASE = min_decay
SCREAMING_SNAKE_CASE = update_after_step
SCREAMING_SNAKE_CASE = use_ema_warmup
SCREAMING_SNAKE_CASE = inv_gamma
SCREAMING_SNAKE_CASE = power
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = None # set in `step()`
SCREAMING_SNAKE_CASE = model_cls
SCREAMING_SNAKE_CASE = model_config
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Union[str, Any] ) -> "EMAModel":
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = model_cls.load_config(lowerCamelCase__ ,return_unused_kwargs=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model_cls.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = cls(model.parameters() ,model_cls=lowerCamelCase__ ,model_config=model.config )
ema_model.load_state_dict(lowerCamelCase__ )
return ema_model
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : str ) -> Tuple:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
SCREAMING_SNAKE_CASE = self.model_cls.from_config(self.model_config )
SCREAMING_SNAKE_CASE = self.state_dict()
state_dict.pop("""shadow_params""" ,lowerCamelCase__ )
model.register_to_config(**lowerCamelCase__ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : int ) -> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE = max(0 ,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
SCREAMING_SNAKE_CASE = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
SCREAMING_SNAKE_CASE = (1 + step) / (10 + step)
SCREAMING_SNAKE_CASE = min(lowerCamelCase__ ,self.decay )
# make sure decay is not smaller than min_decay
SCREAMING_SNAKE_CASE = max(lowerCamelCase__ ,self.min_decay )
return cur_decay_value
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Iterable[torch.nn.Parameter] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCamelCase__ ,torch.nn.Module ):
SCREAMING_SNAKE_CASE = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" ,"""1.0.0""" ,lowerCamelCase__ ,standard_warn=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = parameters.parameters()
SCREAMING_SNAKE_CASE = list(lowerCamelCase__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
SCREAMING_SNAKE_CASE = self.get_decay(self.optimization_step )
SCREAMING_SNAKE_CASE = decay
SCREAMING_SNAKE_CASE = 1 - decay
SCREAMING_SNAKE_CASE = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params ,lowerCamelCase__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
SCREAMING_SNAKE_CASE = deepspeed.zero.GatheredParameters(lowerCamelCase__ ,modifier_rank=lowerCamelCase__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = list(lowerCamelCase__ )
for s_param, param in zip(self.shadow_params ,lowerCamelCase__ ):
param.data.copy_(s_param.to(param.device ).data )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : str=None ,lowerCamelCase__ : int=None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [
p.to(device=lowerCamelCase__ ,dtype=lowerCamelCase__ ) if p.is_floating_point() else p.to(device=lowerCamelCase__ )
for p in self.shadow_params
]
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [param.detach().cpu().clone() for param in parameters]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params ,lowerCamelCase__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
SCREAMING_SNAKE_CASE = None
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : dict ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = copy.deepcopy(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = state_dict.get("""decay""" ,self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
SCREAMING_SNAKE_CASE = state_dict.get("""min_decay""" ,self.min_decay )
if not isinstance(self.min_decay ,lowerCamelCase__ ):
raise ValueError("""Invalid min_decay""" )
SCREAMING_SNAKE_CASE = state_dict.get("""optimization_step""" ,self.optimization_step )
if not isinstance(self.optimization_step ,lowerCamelCase__ ):
raise ValueError("""Invalid optimization_step""" )
SCREAMING_SNAKE_CASE = state_dict.get("""update_after_step""" ,self.update_after_step )
if not isinstance(self.update_after_step ,lowerCamelCase__ ):
raise ValueError("""Invalid update_after_step""" )
SCREAMING_SNAKE_CASE = state_dict.get("""use_ema_warmup""" ,self.use_ema_warmup )
if not isinstance(self.use_ema_warmup ,lowerCamelCase__ ):
raise ValueError("""Invalid use_ema_warmup""" )
SCREAMING_SNAKE_CASE = state_dict.get("""inv_gamma""" ,self.inv_gamma )
if not isinstance(self.inv_gamma ,(float, int) ):
raise ValueError("""Invalid inv_gamma""" )
SCREAMING_SNAKE_CASE = state_dict.get("""power""" ,self.power )
if not isinstance(self.power ,(float, int) ):
raise ValueError("""Invalid power""" )
SCREAMING_SNAKE_CASE = state_dict.get("""shadow_params""" ,lowerCamelCase__ )
if shadow_params is not None:
SCREAMING_SNAKE_CASE = shadow_params
if not isinstance(self.shadow_params ,lowerCamelCase__ ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(lowerCamelCase__ ,torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 193
|
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : int = ComputeEnvironment.AMAZON_SAGEMAKER
__snake_case : List[Any] = True
__snake_case : Optional[int] = "ml.p3.2xlarge"
__snake_case : List[str] = "accelerate_sagemaker_execution_role"
__snake_case : Tuple = "hf-sm"
__snake_case : Any = "us-east-1"
__snake_case : Union[str, Any] = 1
__snake_case : Dict = "accelerate-sagemaker-1"
__snake_case : Tuple = "1.6"
__snake_case : List[str] = "4.4"
__snake_case : str = "train.py"
__snake_case : List[str] = [
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
__snake_case : Optional[int] = [
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["""model_name_or_path"""] ,lowerCamelCase__ )
assert isinstance(converted_args["""do_train"""] ,lowerCamelCase__ )
assert isinstance(converted_args["""epochs"""] ,lowerCamelCase__ )
assert isinstance(converted_args["""learning_rate"""] ,lowerCamelCase__ )
assert isinstance(converted_args["""max_steps"""] ,lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 193
| 1
|
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : int=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Union[str, Any]=None , **UpperCamelCase : List[Any] ):
'''simple docstring'''
if tokenize_kwargs is None:
lowercase__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
lowercase__ = truncation
lowercase__ = tokenize_kwargs
lowercase__ = {}
if return_tensors is not None:
lowercase__ = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCamelCase__ (self : List[str] , UpperCamelCase : str , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.framework
lowercase__ = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
return model_inputs
def UpperCamelCase__ (self : Dict , UpperCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = self.model(**UpperCamelCase )
return model_outputs
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Any=False ):
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__(self : str , *UpperCamelCase : Any , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return super().__call__(*UpperCamelCase , **UpperCamelCase )
| 2
|
import sys
def _A ( SCREAMING_SNAKE_CASE__ : List[str] ):
UpperCamelCase :Any = len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Any = [[0 for x in range(SCREAMING_SNAKE_CASE__ )] for x in range(SCREAMING_SNAKE_CASE__ )]
UpperCamelCase :List[Any] = [[0 for x in range(SCREAMING_SNAKE_CASE__ )] for x in range(SCREAMING_SNAKE_CASE__ )]
for chain_length in range(2 , SCREAMING_SNAKE_CASE__ ):
for a in range(1 , n - chain_length + 1 ):
UpperCamelCase :Optional[Any] = a + chain_length - 1
UpperCamelCase :int = sys.maxsize
for c in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Any = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCamelCase :int = cost
UpperCamelCase :List[str] = c
return matrix, sol
def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
if i == j:
print('''A''' + str(SCREAMING_SNAKE_CASE__ ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , optimal_solution[i][j] )
print_optiomal_solution(SCREAMING_SNAKE_CASE__ , optimal_solution[i][j] + 1 , SCREAMING_SNAKE_CASE__ )
print(''')''' , end=''' ''' )
def _A ( ):
UpperCamelCase :Optional[int] = [30, 35, 15, 5, 10, 20, 25]
UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCamelCase , UpperCamelCase :Dict = matrix_chain_order(SCREAMING_SNAKE_CASE__ )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(SCREAMING_SNAKE_CASE__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 259
| 0
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_snake_case = logging.get_logger(__name__)
class lowerCAmelCase ( lowercase_ ):
def __init__( self :Optional[Any] , *_lowercase :Dict , **_lowercase :Any ):
'''simple docstring'''
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 201
|
import math
def _A ( __magic_name__ ):
lowercase__ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__magic_name__ )
def _A ( __magic_name__ = 1 / 1_2345 ):
lowercase__ = 0
lowercase__ = 0
lowercase__ = 3
while True:
lowercase__ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__magic_name__ ):
lowercase__ = int(__magic_name__ )
total_partitions += 1
if check_partition_perfect(__magic_name__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__magic_name__ )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 201
| 1
|
"""simple docstring"""
import math
import sys
import cva
import numpy as np
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray ,_lowerCamelCase : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
_lowerCAmelCase : Tuple = math.sqrt(_lowerCamelCase )
_lowerCAmelCase : Dict = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray ,_lowerCamelCase : int ,_lowerCamelCase : int ,_lowerCamelCase : int ) -> np.ndarray:
_lowerCAmelCase : List[Any] = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
_lowerCAmelCase : List[str] = np.zeros((kernel_size, kernel_size) )
for i in range(0 ,_lowerCamelCase ):
for j in range(0 ,_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_lowerCamelCase ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray ,_lowerCamelCase : float ,_lowerCamelCase : float ,_lowerCamelCase : int ,) -> np.ndarray:
_lowerCAmelCase : Union[str, Any] = np.zeros(img.shape )
_lowerCAmelCase : Dict = get_gauss_kernel(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Tuple = img.shape
for i in range(kernel_size // 2 ,size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 ,size_y - kernel_size // 2 ):
_lowerCAmelCase : Union[str, Any] = get_slice(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : List[Any] = img_s - img_s[kernel_size // 2, kernel_size // 2]
_lowerCAmelCase : List[Any] = vec_gaussian(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = np.multiply(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : int = np.multiply(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = np.sum(_lowerCamelCase ) / np.sum(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = val
return imga
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ) -> tuple:
_lowerCAmelCase : List[Any] = args[1] if args[1:] else """../image_data/lena.jpg"""
_lowerCAmelCase : Optional[Any] = float(args[2] ) if args[2:] else 1.0
_lowerCAmelCase : Dict = float(args[3] ) if args[3:] else 1.0
if args[4:]:
_lowerCAmelCase : List[str] = int(args[4] )
_lowerCAmelCase : List[str] = kernel_size + abs(kernel_size % 2 - 1 )
else:
_lowerCAmelCase : Tuple = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
_a , _a , _a , _a : Optional[Any] = parse_args(sys.argv)
_a : Optional[Any] = cva.imread(filename, 0)
cva.imshow('input image', img)
_a : List[str] = img / 255
_a : Union[str, Any] = out.astype('float32')
_a : Dict = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
_a : Optional[int] = out * 255
_a : Union[str, Any] = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 44
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 44
| 1
|
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : int = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = "efficientnet"
def __init__( self : int , _lowercase : int = 3 , _lowercase : int = 6_00 , _lowercase : float = 2.0 , _lowercase : float = 3.1 , _lowercase : int = 8 , _lowercase : List[int] = [3, 3, 5, 3, 5, 5, 3] , _lowercase : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , _lowercase : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , _lowercase : List[int] = [] , _lowercase : List[int] = [1, 2, 2, 2, 1, 2, 1] , _lowercase : List[int] = [1, 2, 2, 3, 3, 4, 1] , _lowercase : List[int] = [1, 6, 6, 6, 6, 6, 6] , _lowercase : float = 0.25 , _lowercase : str = "swish" , _lowercase : int = 25_60 , _lowercase : str = "mean" , _lowercase : float = 0.02 , _lowercase : float = 0.0_01 , _lowercase : float = 0.99 , _lowercase : float = 0.5 , _lowercase : float = 0.2 , **_lowercase : List[str] , ):
"""simple docstring"""
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = width_coefficient
SCREAMING_SNAKE_CASE__ = depth_coefficient
SCREAMING_SNAKE_CASE__ = depth_divisor
SCREAMING_SNAKE_CASE__ = kernel_sizes
SCREAMING_SNAKE_CASE__ = in_channels
SCREAMING_SNAKE_CASE__ = out_channels
SCREAMING_SNAKE_CASE__ = depthwise_padding
SCREAMING_SNAKE_CASE__ = strides
SCREAMING_SNAKE_CASE__ = num_block_repeats
SCREAMING_SNAKE_CASE__ = expand_ratios
SCREAMING_SNAKE_CASE__ = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dim
SCREAMING_SNAKE_CASE__ = pooling_type
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = batch_norm_eps
SCREAMING_SNAKE_CASE__ = batch_norm_momentum
SCREAMING_SNAKE_CASE__ = dropout_rate
SCREAMING_SNAKE_CASE__ = drop_connect_rate
SCREAMING_SNAKE_CASE__ = sum(_lowercase ) * 4
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = version.parse("1.11" )
@property
def __a ( self : Union[str, Any] ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __a ( self : Optional[int] ):
"""simple docstring"""
return 1E-5
| 204
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
__lowerCamelCase : Optional[Any] = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
__lowerCamelCase : int = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE__ = numpy_to_pil(__UpperCamelCase )
return images
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
if images.ndim == 3:
SCREAMING_SNAKE_CASE__ = images[None, ...]
SCREAMING_SNAKE_CASE__ = (images * 2_55).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
SCREAMING_SNAKE_CASE__ = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
SCREAMING_SNAKE_CASE__ = [Image.fromarray(__UpperCamelCase ) for image in images]
return pil_images
| 204
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""squeezebert/squeezebert-uncased""": 512,
"""squeezebert/squeezebert-mnli""": 512,
"""squeezebert/squeezebert-mnli-headless""": 512,
}
SCREAMING_SNAKE_CASE : Tuple = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =SqueezeBertTokenizer
def __init__(self , a_=None , a_=None , a_=True , a_="[UNK]" , a_="[SEP]" , a_="[PAD]" , a_="[CLS]" , a_="[MASK]" , a_=True , a_=None , **a_ , ):
'''simple docstring'''
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , )
__snake_case : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowercase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowercase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowercase_ ) != tokenize_chinese_chars
):
__snake_case : Any = getattr(lowercase_ , normalizer_state.pop('''type''' ) )
__snake_case : Optional[Any] = do_lower_case
__snake_case : Tuple = strip_accents
__snake_case : Any = tokenize_chinese_chars
__snake_case : Tuple = normalizer_class(**lowercase_ )
__snake_case : str = do_lower_case
def SCREAMING_SNAKE_CASE (self , a_ , a_=None ):
'''simple docstring'''
__snake_case : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
__snake_case : Optional[Any] = [self.sep_token_id]
__snake_case : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
__snake_case : Dict = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
| 102
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case_ = logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""pixel_values"""]
def __init__( self :int , lowercase_ :bool = True , lowercase_ :Dict[str, int] = None , lowercase_ :PILImageResampling = PILImageResampling.BICUBIC , lowercase_ :bool = True , lowercase_ :Union[int, float] = 1 / 2_55 , lowercase_ :bool = True , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :bool = True , **lowercase_ :Union[str, Any] , ) -> None:
super().__init__(**lowercase_ )
UpperCAmelCase = size if size is not None else {'height': 3_84, 'width': 3_84}
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase = do_convert_rgb
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :np.ndarray , lowercase_ :Dict[str, int] , lowercase_ :PILImageResampling = PILImageResampling.BICUBIC , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Any , ) -> np.ndarray:
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
UpperCAmelCase = (size['height'], size['width'])
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :np.ndarray , lowercase_ :Union[int, float] , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Optional[int] , ) -> int:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Any , lowercase_ :np.ndarray , lowercase_ :Union[float, List[float]] , lowercase_ :Union[float, List[float]] , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Optional[Any] , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :ImageInput , lowercase_ :Optional[bool] = None , lowercase_ :Optional[Dict[str, int]] = None , lowercase_ :PILImageResampling = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[float] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[str, TensorType]] = None , lowercase_ :bool = None , lowercase_ :ChannelDimension = ChannelDimension.FIRST , **lowercase_ :Tuple , ) -> PIL.Image.Image:
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
UpperCAmelCase = BatchFeature(data={'pixel_values': images} , tensor_type=lowercase_ )
return encoded_outputs
| 78
| 0
|
import os
from datetime import datetime as dt
from github import Github
lowerCamelCase = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCAmelCase__ : Optional[int] = g.get_repo('''huggingface/diffusers''' )
lowerCAmelCase__ : int = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCAmelCase__ : str = sorted(issue.get_comments() , key=lambda _a : i.created_at , reverse=_a )
lowerCAmelCase__ : List[Any] = comments[0] if len(_a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 360
|
from collections.abc import Iterable
from typing import Any
class _a :
def __init__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : int | None = None )-> Tuple:
lowerCAmelCase__ : Union[str, Any] = value
lowerCAmelCase__ : Node | None = None # Added in order to delete a node easier
lowerCAmelCase__ : Node | None = None
lowerCAmelCase__ : Node | None = None
def __repr__( self : List[Any] )-> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'{self.value}': (self.left, self.right)} , indent=1 )
class _a :
def __init__( self : Tuple , _SCREAMING_SNAKE_CASE : Node | None = None )-> int:
lowerCAmelCase__ : Dict = root
def __str__( self : Tuple )-> str:
return str(self.root )
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : Node , _SCREAMING_SNAKE_CASE : Node | None )-> None:
if new_children is not None: # reset its kids
lowerCAmelCase__ : Union[str, Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_SCREAMING_SNAKE_CASE ): # If it is the right children
lowerCAmelCase__ : List[str] = new_children
else:
lowerCAmelCase__ : Any = new_children
else:
lowerCAmelCase__ : Optional[Any] = new_children
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : Node )-> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase__( self : int )-> bool:
return self.root is None
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] )-> None:
lowerCAmelCase__ : Any = Node(_SCREAMING_SNAKE_CASE ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase__ : Optional[Any] = new_node # set its root
else: # Tree is not empty
lowerCAmelCase__ : str = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase__ : Dict = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase__ : List[Any] = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase__ : Tuple = new_node
break
else:
lowerCAmelCase__ : Union[str, Any] = parent_node.right
lowerCAmelCase__ : Dict = parent_node
def UpperCAmelCase__( self : str , *_SCREAMING_SNAKE_CASE : List[Any] )-> None:
for value in values:
self.__insert(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] )-> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
lowerCAmelCase__ : Union[str, Any] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase__ : List[str] = node.left if value < node.value else node.right
return node
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : Node | None = None )-> Node | None:
if node is None:
if self.root is None:
return None
lowerCAmelCase__ : List[Any] = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase__ : str = node.right
return node
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : Node | None = None )-> Node | None:
if node is None:
lowerCAmelCase__ : Any = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase__ : Dict = self.root
while node.left is not None:
lowerCAmelCase__ : Tuple = node.left
return node
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : int )-> None:
lowerCAmelCase__ : Optional[Any] = self.search(_SCREAMING_SNAKE_CASE ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif node.left is None: # Has only right children
self.__reassign_nodes(_SCREAMING_SNAKE_CASE , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_SCREAMING_SNAKE_CASE , node.left )
else:
lowerCAmelCase__ : Tuple = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase__ : Optional[Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Node | None )-> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple=None )-> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : Node | None )-> None:
if node:
self.inorder(_SCREAMING_SNAKE_CASE , node.left )
arr.append(node.value )
self.inorder(_SCREAMING_SNAKE_CASE , node.right )
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Node )-> int:
lowerCAmelCase__ : list[int] = []
self.inorder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = []
if curr_node is not None:
lowerCAmelCase__ : Optional[int] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase__ : str = BinarySearchTree()
for i in testlist:
t.insert(_a )
# Prints all the elements of the list in order traversal
print(_a )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_a )
print(_a )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 211
| 0
|
from math import isqrt
def UpperCamelCase__( UpperCamelCase__ : int )->bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(UpperCamelCase__ ) + 1 ) )
def UpperCamelCase__( UpperCamelCase__ : int = 10**6 )->int:
A__ = 0
A__ = 1
A__ = 7
while prime_candidate < max_prime:
primes_count += is_prime(UpperCamelCase__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"{solution() = }")
| 193
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self,__lowerCamelCase,__lowerCamelCase=3,__lowerCamelCase=32,__lowerCamelCase=3,__lowerCamelCase=10,__lowerCamelCase=[10, 20, 30, 40],__lowerCamelCase=[1, 1, 2, 1],__lowerCamelCase=True,__lowerCamelCase=True,__lowerCamelCase="relu",__lowerCamelCase=3,__lowerCamelCase=None,):
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = embeddings_size
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = num_labels
A__ = scope
A__ = len(__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = self.get_config()
return config, pixel_values
def UpperCamelCase ( self ):
return RegNetConfig(
num_channels=self.num_channels,embeddings_size=self.embeddings_size,hidden_sizes=self.hidden_sizes,depths=self.depths,hidden_act=self.hidden_act,num_labels=self.num_labels,image_size=self.image_size,)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = FlaxRegNetModel(config=__lowerCamelCase )
A__ = model(__lowerCamelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32),)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = self.num_labels
A__ = FlaxRegNetForImageClassification(config=__lowerCamelCase )
A__ = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def UpperCamelCase ( self ):
A__ = self.prepare_config_and_inputs()
A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ):
A__ = FlaxRegNetModelTester(self )
A__ = ConfigTester(self,config_class=__lowerCamelCase,has_text_modality=__lowerCamelCase )
def UpperCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ):
return
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def UpperCamelCase ( self ):
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(__lowerCamelCase )
A__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1],__lowerCamelCase )
def UpperCamelCase ( self ):
def check_hidden_states_output(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = model_class(__lowerCamelCase )
A__ = model(**self._prepare_for_class(__lowerCamelCase,__lowerCamelCase ) )
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ),expected_num_stages + 1 )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
def UpperCamelCase ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A__ = self._prepare_for_class(__lowerCamelCase,__lowerCamelCase )
A__ = model_class(__lowerCamelCase )
@jax.jit
def model_jitted(__lowerCamelCase,**__lowerCamelCase ):
return model(pixel_values=__lowerCamelCase,**__lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
A__ = model_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
A__ = model_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ),len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase,__lowerCamelCase ):
self.assertEqual(jitted_output.shape,output.shape )
def UpperCamelCase__( )->Optional[int]:
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ):
A__ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=__lowerCamelCase,return_tensors='''np''' )
A__ = model(**__lowerCamelCase )
# verify the logits
A__ = (1, 1000)
self.assertEqual(outputs.logits.shape,__lowerCamelCase )
A__ = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3],__lowerCamelCase,atol=1E-4 ) )
| 193
| 1
|
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class snake_case_( a__ ):
def __init__( self : Any , UpperCamelCase_ : NestedDataStructureLike[PathLike] , UpperCamelCase_ : Optional[NamedSplit] = None , UpperCamelCase_ : Optional[Features] = None , UpperCamelCase_ : str = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[int] = None , **UpperCamelCase_ : str , ):
super().__init__(
UpperCamelCase_ , split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : str = path_or_paths if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else {self.split: path_or_paths}
lowerCAmelCase : Tuple = Text(
cache_dir=UpperCamelCase_ , data_files=UpperCamelCase_ , features=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCamelCase__ ( self : Dict ):
# Build iterable dataset
if self.streaming:
lowerCAmelCase : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowerCAmelCase : Any = None
lowerCAmelCase : Dict = None
lowerCAmelCase : List[str] = None
lowerCAmelCase : int = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
lowerCAmelCase : Any = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset
| 314
|
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class snake_case_( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Tuple , UpperCamelCase_ : float , UpperCamelCase_ : Callable , UpperCamelCase_ : int , UpperCamelCase_ : float = 1.0 , UpperCamelCase_ : str = None , ):
super().__init__()
lowerCAmelCase : Dict = initial_learning_rate
lowerCAmelCase : List[str] = warmup_steps
lowerCAmelCase : Union[str, Any] = power
lowerCAmelCase : Dict = decay_schedule_fn
lowerCAmelCase : str = name
def __call__( self : Dict , UpperCamelCase_ : Optional[Any] ):
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowerCAmelCase : Dict = tf.cast(UpperCamelCase_ , tf.floataa )
lowerCAmelCase : List[Any] = tf.cast(self.warmup_steps , tf.floataa )
lowerCAmelCase : str = global_step_float / warmup_steps_float
lowerCAmelCase : Any = self.initial_learning_rate * tf.math.pow(UpperCamelCase_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCamelCase_ , )
def lowerCamelCase__ ( self : str ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _snake_case ( _snake_case : float , _snake_case : int , _snake_case : int , _snake_case : float = 0.0 , _snake_case : float = 0.9 , _snake_case : float = 0.999 , _snake_case : float = 1E-8 , _snake_case : Optional[float] = None , _snake_case : Optional[float] = None , _snake_case : float = 0.0 , _snake_case : float = 1.0 , _snake_case : Optional[List[str]] = None , ):
lowerCAmelCase : Dict = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_snake_case , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_snake_case , )
if num_warmup_steps:
lowerCAmelCase : List[str] = WarmUp(
initial_learning_rate=_snake_case , decay_schedule_fn=_snake_case , warmup_steps=_snake_case , )
if weight_decay_rate > 0.0:
lowerCAmelCase : Dict = AdamWeightDecay(
learning_rate=_snake_case , weight_decay_rate=_snake_case , beta_a=_snake_case , beta_a=_snake_case , epsilon=_snake_case , clipnorm=_snake_case , global_clipnorm=_snake_case , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=_snake_case , )
else:
lowerCAmelCase : Any = tf.keras.optimizers.Adam(
learning_rate=_snake_case , beta_a=_snake_case , beta_a=_snake_case , epsilon=_snake_case , clipnorm=_snake_case , global_clipnorm=_snake_case , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class snake_case_( a__ ):
def __init__( self : Optional[int] , UpperCamelCase_ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , UpperCamelCase_ : float = 0.9 , UpperCamelCase_ : float = 0.999 , UpperCamelCase_ : float = 1E-7 , UpperCamelCase_ : bool = False , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : str = "AdamWeightDecay" , **UpperCamelCase_ : List[Any] , ):
super().__init__(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Tuple = weight_decay_rate
lowerCAmelCase : List[str] = include_in_weight_decay
lowerCAmelCase : Union[str, Any] = exclude_from_weight_decay
@classmethod
def lowerCamelCase__ ( cls : int , UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase : Tuple = {'''WarmUp''': WarmUp}
return super(UpperCamelCase_ , cls ).from_config(UpperCamelCase_ , custom_objects=UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple ):
super(UpperCamelCase_ , self )._prepare_local(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Any = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Any = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : List[Any] ):
lowerCAmelCase, lowerCAmelCase : List[Any] = list(zip(*UpperCamelCase_ ) )
return super(UpperCamelCase_ , self ).apply_gradients(zip(UpperCamelCase_ , UpperCamelCase_ ) , name=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowerCAmelCase : Dict = apply_state or {}
lowerCAmelCase : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowerCAmelCase : Optional[Any] = self._fallback_apply_state(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : str = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str]=None ):
lowerCAmelCase, lowerCAmelCase : Any = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
lowerCAmelCase : List[str] = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_dense(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]=None ):
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
lowerCAmelCase : Tuple = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_sparse(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : str = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List[str] ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return False
return True
class snake_case_( a__ ):
def __init__( self : Any ):
lowerCAmelCase : Any = []
lowerCAmelCase : List[str] = None
@property
def lowerCamelCase__ ( self : List[str] ):
if self._accum_steps is None:
lowerCAmelCase : Optional[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCamelCase__ ( self : Any ):
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Optional[Any] , UpperCamelCase_ : List[Any] ):
if not self._gradients:
lowerCAmelCase : Any = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCamelCase_ ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCamelCase_ ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(UpperCamelCase_ )}''' )
for accum_gradient, gradient in zip(self._gradients , UpperCamelCase_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCamelCase_ )
self._accum_steps.assign_add(1 )
def lowerCamelCase__ ( self : Union[str, Any] ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCamelCase_ ) )
| 314
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCAmelCase_ ( __UpperCAmelCase: Dict , __UpperCAmelCase: Tuple=False ) -> Dict:
UpperCamelCase__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase__ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] , __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: List[str]=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase__ : Dict = ''''''
else:
UpperCamelCase__ : Tuple = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : int = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" )
UpperCamelCase__ : Dict = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ : Optional[Any] = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : int = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __UpperCAmelCase: Dict ) -> Optional[int]:
UpperCamelCase__ : List[Any] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: Dict ) -> Tuple:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
UpperCamelCase__ : Tuple = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: Optional[int] , __UpperCAmelCase: str ) -> str:
UpperCamelCase__ : str = dct.pop(__UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = val
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] , __UpperCAmelCase: Optional[int] ) -> List[str]:
UpperCamelCase__ : Tuple = ViTMSNConfig()
UpperCamelCase__ : List[str] = 1000
UpperCamelCase__ : Dict = '''datasets/huggingface/label-files'''
UpperCamelCase__ : Tuple = '''imagenet-1k-id2label.json'''
UpperCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase ) , '''r''' ) )
UpperCamelCase__ : List[str] = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ : Union[str, Any] = idalabel
UpperCamelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
UpperCamelCase__ : int = 384
UpperCamelCase__ : List[str] = 1536
UpperCamelCase__ : Optional[Any] = 6
elif "l16" in checkpoint_url:
UpperCamelCase__ : List[Any] = 1024
UpperCamelCase__ : Any = 4096
UpperCamelCase__ : Optional[Any] = 24
UpperCamelCase__ : Tuple = 16
UpperCamelCase__ : List[Any] = 0.1
elif "b4" in checkpoint_url:
UpperCamelCase__ : Any = 4
elif "l7" in checkpoint_url:
UpperCamelCase__ : str = 7
UpperCamelCase__ : List[str] = 1024
UpperCamelCase__ : List[Any] = 4096
UpperCamelCase__ : List[Any] = 24
UpperCamelCase__ : List[str] = 16
UpperCamelCase__ : Optional[Any] = 0.1
UpperCamelCase__ : int = ViTMSNModel(__UpperCAmelCase )
UpperCamelCase__ : Dict = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location='''cpu''' )['''target_encoder''']
UpperCamelCase__ : Optional[int] = ViTImageProcessor(size=config.image_size )
remove_projection_head(__UpperCAmelCase )
UpperCamelCase__ : List[str] = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , base_model=__UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
UpperCamelCase__ : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ : str = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
UpperCamelCase__ : int = ViTImageProcessor(
size=config.image_size , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase )
UpperCamelCase__ : Tuple = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
UpperCamelCase__ : List[str] = model(**__UpperCAmelCase )
UpperCamelCase__ : int = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
UpperCamelCase__ : Dict = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
UpperCamelCase__ : str = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
UpperCamelCase__ : Dict = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
UpperCamelCase__ : List[Any] = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
UpperCamelCase__ : Union[str, Any] = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __UpperCAmelCase , atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 201
|
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__ = "", __magic_name__ = False ) -> None:
"""simple docstring"""
# Mapping from the first character of the prefix of the node
UpperCamelCase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
UpperCamelCase__ : Optional[Any] = is_leaf
UpperCamelCase__ : List[str] = prefix
def UpperCamelCase__ ( self, __magic_name__ ) -> tuple[str, str, str]:
"""simple docstring"""
UpperCamelCase__ : Dict = 0
for q, w in zip(self.prefix, __magic_name__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCamelCase__ ( self, __magic_name__ ) -> None:
"""simple docstring"""
for word in words:
self.insert(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> None:
"""simple docstring"""
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
UpperCamelCase__ : Union[str, Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCamelCase__ : Tuple = RadixNode(prefix=__magic_name__, is_leaf=__magic_name__ )
else:
UpperCamelCase__ : Any = self.nodes[word[0]]
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = incoming_node.match(
__magic_name__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__magic_name__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCamelCase__ : Tuple = remaining_prefix
UpperCamelCase__ : Tuple = self.nodes[matching_string[0]]
UpperCamelCase__ : List[Any] = RadixNode(__magic_name__, __magic_name__ )
UpperCamelCase__ : str = aux_node
if remaining_word == "":
UpperCamelCase__ : Any = True
else:
self.nodes[matching_string[0]].insert(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> bool:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0], __magic_name__ )
if not incoming_node:
return False
else:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Dict = incoming_node.match(
__magic_name__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> bool:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0], __magic_name__ )
if not incoming_node:
return False
else:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = incoming_node.match(
__magic_name__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__magic_name__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
UpperCamelCase__ : Optional[Any] = list(self.nodes.values() )[0]
UpperCamelCase__ : Union[str, Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCamelCase__ : int = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
UpperCamelCase__ : Any = False
# If there is 1 edge, we merge it with its child
else:
UpperCamelCase__ : Union[str, Any] = list(incoming_node.nodes.values() )[0]
UpperCamelCase__ : List[str] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCamelCase__ : int = merging_node.nodes
return True
def UpperCamelCase__ ( self, __magic_name__ = 0 ) -> None:
"""simple docstring"""
if self.prefix != "":
print('''-''' * height, self.prefix, ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def lowerCAmelCase_ ( ) -> bool:
UpperCamelCase__ : Optional[int] = '''banana bananas bandana band apple all beast'''.split()
UpperCamelCase__ : Optional[int] = RadixNode()
root.insert_many(__UpperCAmelCase )
assert all(root.find(__UpperCAmelCase ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def lowerCAmelCase_ ( ) -> None:
assert test_trie()
def lowerCAmelCase_ ( ) -> None:
UpperCamelCase__ : int = RadixNode()
UpperCamelCase__ : Any = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(__UpperCAmelCase )
print('''Words:''' , __UpperCAmelCase )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 201
| 1
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase : Any = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __lowercase ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Optional[int] , **lowerCAmelCase__ : str):
super().__init__(**__lowerCamelCase)
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch.")
requires_backends(self , "vision")
self.check_model_type(__lowerCamelCase)
def __call__( self : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict = None , **lowerCAmelCase__ : List[Any] , ):
if "text_queries" in kwargs:
SCREAMING_SNAKE_CASE_: int = kwargs.pop("text_queries")
if isinstance(__lowerCamelCase , (str, Image.Image)):
SCREAMING_SNAKE_CASE_: Any = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
SCREAMING_SNAKE_CASE_: List[Any] = image
SCREAMING_SNAKE_CASE_: int = super().__call__(__lowerCamelCase , **__lowerCamelCase)
return results
def _SCREAMING_SNAKE_CASE ( self : List[str] , **lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[str] = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE_: Optional[int] = kwargs['''threshold''']
if "top_k" in kwargs:
SCREAMING_SNAKE_CASE_: List[str] = kwargs['''top_k''']
return {}, {}, postprocess_params
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any]):
SCREAMING_SNAKE_CASE_: Dict = load_image(inputs["image"])
SCREAMING_SNAKE_CASE_: Any = inputs['''candidate_labels''']
if isinstance(__lowerCamelCase , __lowerCamelCase):
SCREAMING_SNAKE_CASE_: List[str] = candidate_labels.split(",")
SCREAMING_SNAKE_CASE_: List[str] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(__lowerCamelCase):
SCREAMING_SNAKE_CASE_: List[Any] = self.tokenizer(__lowerCamelCase , return_tensors=self.framework)
SCREAMING_SNAKE_CASE_: Tuple = self.image_processor(__lowerCamelCase , return_tensors=self.framework)
yield {
"is_last": i == len(__lowerCamelCase) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Optional[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = model_inputs.pop("target_size")
SCREAMING_SNAKE_CASE_: Union[str, Any] = model_inputs.pop("candidate_label")
SCREAMING_SNAKE_CASE_: Any = model_inputs.pop("is_last")
SCREAMING_SNAKE_CASE_: List[str] = self.model(**__lowerCamelCase)
SCREAMING_SNAKE_CASE_: Tuple = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Dict=None):
SCREAMING_SNAKE_CASE_: Dict = []
for model_output in model_outputs:
SCREAMING_SNAKE_CASE_: Union[str, Any] = model_output['''candidate_label''']
SCREAMING_SNAKE_CASE_: Optional[int] = BaseModelOutput(__lowerCamelCase)
SCREAMING_SNAKE_CASE_: Any = self.image_processor.post_process_object_detection(
outputs=__lowerCamelCase , threshold=__lowerCamelCase , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
SCREAMING_SNAKE_CASE_: List[str] = outputs['''scores'''][index].item()
SCREAMING_SNAKE_CASE_: Optional[int] = self._get_bounding_box(outputs["boxes"][index][0])
SCREAMING_SNAKE_CASE_: Tuple = {'''score''': score, '''label''': label, '''box''': box}
results.append(__lowerCamelCase)
SCREAMING_SNAKE_CASE_: Dict = sorted(__lowerCamelCase , key=lambda lowerCAmelCase__: x["score"] , reverse=__lowerCamelCase)
if top_k:
SCREAMING_SNAKE_CASE_: Tuple = results[:top_k]
return results
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[Any]):
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
SCREAMING_SNAKE_CASE_: Dict = box.int().tolist()
SCREAMING_SNAKE_CASE_: Optional[int] = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 356
|
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __lowercase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : float , lowerCAmelCase__ : Callable , lowerCAmelCase__ : int , lowerCAmelCase__ : float = 1.0 , lowerCAmelCase__ : str = None , ):
super().__init__()
SCREAMING_SNAKE_CASE_: str = initial_learning_rate
SCREAMING_SNAKE_CASE_: Dict = warmup_steps
SCREAMING_SNAKE_CASE_: Any = power
SCREAMING_SNAKE_CASE_: int = decay_schedule_fn
SCREAMING_SNAKE_CASE_: Union[str, Any] = name
def __call__( self : Optional[Any] , lowerCAmelCase__ : Any):
with tf.name_scope(self.name or "WarmUp") as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
SCREAMING_SNAKE_CASE_: Any = tf.cast(lowerCAmelCase__ , tf.floataa)
SCREAMING_SNAKE_CASE_: Optional[Any] = tf.cast(self.warmup_steps , tf.floataa)
SCREAMING_SNAKE_CASE_: Optional[int] = global_step_float / warmup_steps_float
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.initial_learning_rate * tf.math.pow(lowerCAmelCase__ , self.power)
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps) , name=lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self : Tuple):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 0.9 , _UpperCAmelCase = 0.9_9_9 , _UpperCAmelCase = 1e-8 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 1.0 , _UpperCAmelCase = None , ):
SCREAMING_SNAKE_CASE_: Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_UpperCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_UpperCAmelCase , )
if num_warmup_steps:
SCREAMING_SNAKE_CASE_: Tuple = WarmUp(
initial_learning_rate=_UpperCAmelCase , decay_schedule_fn=_UpperCAmelCase , warmup_steps=_UpperCAmelCase , )
if weight_decay_rate > 0.0:
SCREAMING_SNAKE_CASE_: List[str] = AdamWeightDecay(
learning_rate=_UpperCAmelCase , weight_decay_rate=_UpperCAmelCase , beta_a=_UpperCAmelCase , beta_a=_UpperCAmelCase , epsilon=_UpperCAmelCase , clipnorm=_UpperCAmelCase , global_clipnorm=_UpperCAmelCase , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=_UpperCAmelCase , )
else:
SCREAMING_SNAKE_CASE_: int = tf.keras.optimizers.Adam(
learning_rate=_UpperCAmelCase , beta_a=_UpperCAmelCase , beta_a=_UpperCAmelCase , epsilon=_UpperCAmelCase , clipnorm=_UpperCAmelCase , global_clipnorm=_UpperCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , lowerCAmelCase__ : float = 0.9 , lowerCAmelCase__ : float = 0.999 , lowerCAmelCase__ : float = 1E-7 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : Optional[List[str]] = None , lowerCAmelCase__ : Optional[List[str]] = None , lowerCAmelCase__ : str = "AdamWeightDecay" , **lowerCAmelCase__ : int , ):
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = weight_decay_rate
SCREAMING_SNAKE_CASE_: List[Any] = include_in_weight_decay
SCREAMING_SNAKE_CASE_: List[Any] = exclude_from_weight_decay
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: List[str] = {"WarmUp": WarmUp}
return super(lowerCAmelCase__ , cls).from_config(lowerCAmelCase__ , custom_objects=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int]):
super(lowerCAmelCase__ , self)._prepare_local(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = tf.constant(
self.weight_decay_rate , name="adam_weight_decay_rate")
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: str = self._do_use_weight_decay(var.name)
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , )
return tf.no_op()
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any]=None , **lowerCAmelCase__ : List[str]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = list(zip(*lowerCAmelCase__))
return super(lowerCAmelCase__ , self).apply_gradients(zip(lowerCAmelCase__ , lowerCAmelCase__) , name=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
SCREAMING_SNAKE_CASE_: Dict = apply_state or {}
SCREAMING_SNAKE_CASE_: List[str] = apply_state.get((var_device, var_dtype))
if coefficients is None:
SCREAMING_SNAKE_CASE_: Optional[int] = self._fallback_apply_state(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple=None):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = self._decay_weights_op(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
with tf.control_dependencies([decay]):
return super(lowerCAmelCase__ , self)._resource_apply_dense(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict=None):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = self._get_lr(var.device , var.dtype.base_dtype , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = self._decay_weights_op(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
with tf.control_dependencies([decay]):
return super(lowerCAmelCase__ , self)._resource_apply_sparse(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[str] = super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate})
return config
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCAmelCase__ , lowerCAmelCase__) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCAmelCase__ , lowerCAmelCase__) is not None:
return False
return True
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Any = []
SCREAMING_SNAKE_CASE_: Any = None
@property
def _SCREAMING_SNAKE_CASE ( self : int):
if self._accum_steps is None:
SCREAMING_SNAKE_CASE_: Tuple = tf.Variable(
tf.constant(0 , dtype=tf.intaa) , trainable=lowerCAmelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients")
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : str , lowerCAmelCase__ : Tuple):
if not self._gradients:
SCREAMING_SNAKE_CASE_: Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCAmelCase__) , trainable=lowerCAmelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
])
if len(lowerCAmelCase__) != len(self._gradients):
raise ValueError(F"Expected {len(self._gradients)} gradients, but got {len(lowerCAmelCase__)}")
for accum_gradient, gradient in zip(self._gradients , lowerCAmelCase__):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCAmelCase__)
self._accum_steps.assign_add(1)
def _SCREAMING_SNAKE_CASE ( self : int):
if not self._gradients:
return
self._accum_steps.assign(0)
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCAmelCase__))
| 127
| 0
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''blenderbot-small'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : int , A_ : Any=50265 , A_ : int=512 , A_ : int=8 , A_ : Optional[int]=2048 , A_ : Optional[int]=16 , A_ : List[Any]=8 , A_ : str=2048 , A_ : Union[str, Any]=16 , A_ : int=0.0 , A_ : Optional[Any]=0.0 , A_ : List[str]=True , A_ : Optional[int]=True , A_ : Dict="gelu" , A_ : int=512 , A_ : str=0.1 , A_ : Optional[Any]=0.0 , A_ : Any=0.0 , A_ : str=0.02 , A_ : Optional[Any]=1 , A_ : Optional[Any]=False , A_ : Optional[Any]=0 , A_ : str=1 , A_ : List[str]=2 , A_ : Optional[int]=2 , **A_ : List[str] , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = d_model
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = encoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = decoder_layers
lowerCamelCase_ = decoder_attention_heads
lowerCamelCase_ = dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = activation_function
lowerCamelCase_ = init_std
lowerCamelCase_ = encoder_layerdrop
lowerCamelCase_ = decoder_layerdrop
lowerCamelCase_ = use_cache
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , )
class A( UpperCamelCase ):
'''simple docstring'''
@property
def a__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase_ = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
lowerCamelCase_ = {0: 'batch'}
lowerCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowerCamelCase_ = {0: 'batch', 1: 'decoder_sequence'}
lowerCamelCase_ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(A_ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCamelCase_ = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
lowerCamelCase_ , lowerCamelCase_ = self.num_layers
for i in range(A_ ):
lowerCamelCase_ = {0: 'batch', 2: 'past_sequence + sequence'}
lowerCamelCase_ = {0: 'batch', 2: 'past_sequence + sequence'}
else:
lowerCamelCase_ = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def a__ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase_ = super().outputs
else:
lowerCamelCase_ = super(A_ , self ).outputs
if self.use_past:
lowerCamelCase_ , lowerCamelCase_ = self.num_layers
for i in range(A_ ):
lowerCamelCase_ = {0: 'batch', 2: 'past_sequence + sequence'}
lowerCamelCase_ = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def a__ ( self : int , A_ : PreTrainedTokenizer , A_ : int = -1 , A_ : int = -1 , A_ : bool = False , A_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , A_ , A_ , A_ , A_ )
# Generate decoder inputs
lowerCamelCase_ = seq_length if not self.use_past else 1
lowerCamelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , A_ , A_ , A_ , A_ )
lowerCamelCase_ = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
lowerCamelCase_ = dict(**A_ , **A_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowerCamelCase_ , lowerCamelCase_ = common_inputs['input_ids'].shape
lowerCamelCase_ = common_inputs['decoder_input_ids'].shape[1]
lowerCamelCase_ , lowerCamelCase_ = self.num_attention_heads
lowerCamelCase_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase_ = decoder_seq_length + 3
lowerCamelCase_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCamelCase_ = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(A_ , A_ )] , dim=1 )
lowerCamelCase_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCamelCase_ , lowerCamelCase_ = self.num_layers
lowerCamelCase_ = min(A_ , A_ )
lowerCamelCase_ = max(A_ , A_ ) - min_num_layers
lowerCamelCase_ = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(A_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(A_ ),
torch.zeros(A_ ),
torch.zeros(A_ ),
torch.zeros(A_ ),
) )
# TODO: test this.
lowerCamelCase_ = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(A_ , A_ ):
common_inputs["past_key_values"].append((torch.zeros(A_ ), torch.zeros(A_ )) )
return common_inputs
def a__ ( self : str , A_ : PreTrainedTokenizer , A_ : int = -1 , A_ : int = -1 , A_ : bool = False , A_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , A_ , A_ , A_ , A_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowerCamelCase_ , lowerCamelCase_ = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowerCamelCase_ = seqlen + 2
lowerCamelCase_ , lowerCamelCase_ = self.num_layers
lowerCamelCase_ , lowerCamelCase_ = self.num_attention_heads
lowerCamelCase_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase_ = common_inputs['attention_mask'].dtype
lowerCamelCase_ = torch.cat(
[common_inputs['attention_mask'], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 )
lowerCamelCase_ = [
(torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(A_ )
]
return common_inputs
def a__ ( self : Tuple , A_ : PreTrainedTokenizer , A_ : int = -1 , A_ : int = -1 , A_ : bool = False , A_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
lowerCamelCase_ = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase_ = tokenizer.num_special_tokens_to_add(A_ )
lowerCamelCase_ = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase_ = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCamelCase_ = dict(tokenizer(A_ , return_tensors=A_ ) )
return common_inputs
def a__ ( self : int , A_ : PreTrainedTokenizer , A_ : int = -1 , A_ : int = -1 , A_ : bool = False , A_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
elif self.task == "causal-lm":
lowerCamelCase_ = self._generate_dummy_inputs_for_causal_lm(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
else:
lowerCamelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
return common_inputs
def a__ ( self : Any , A_ : Optional[int] , A_ : Dict , A_ : Optional[Any] , A_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase_ = super()._flatten_past_key_values_(A_ , A_ , A_ , A_ )
else:
lowerCamelCase_ = super(A_ , self )._flatten_past_key_values_(
A_ , A_ , A_ , A_ )
| 204
|
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCamelCase : List[str] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Optional[int] , lowercase : Optional[int] ):
'''simple docstring'''
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def _SCREAMING_SNAKE_CASE ( lowercase : np.ndarray , lowercase : Optional[str] , lowercase : Optional[str] ):
'''simple docstring'''
lowerCamelCase_ = to_pil_image(lowercase )
lowerCamelCase_ , lowerCamelCase_ = pil_image.size
lowerCamelCase_ = pytesseract.image_to_data(lowercase , lang=lowercase , output_type='dict' , config=lowercase )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
lowerCamelCase_ = [idx for idx, word in enumerate(lowercase ) if not word.strip()]
lowerCamelCase_ = [word for idx, word in enumerate(lowercase ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCamelCase_ = []
for x, y, w, h in zip(lowercase , lowercase , lowercase , lowercase ):
lowerCamelCase_ = [x, y, x + w, y + h]
actual_boxes.append(lowercase )
# finally, normalize the bounding boxes
lowerCamelCase_ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase , lowercase , lowercase ) )
assert len(lowercase ) == len(lowercase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : int , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BILINEAR , A_ : bool = True , A_ : float = 1 / 255 , A_ : bool = True , A_ : Union[float, Iterable[float]] = None , A_ : Union[float, Iterable[float]] = None , A_ : bool = True , A_ : Optional[str] = None , A_ : Optional[str] = "" , **A_ : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = size if size is not None else {'height': 224, 'width': 224}
lowerCamelCase_ = get_size_dict(A_ )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = resample
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_value
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowerCamelCase_ = apply_ocr
lowerCamelCase_ = ocr_lang
lowerCamelCase_ = tesseract_config
def a__ ( self : str , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BILINEAR , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowerCamelCase_ = (size['height'], size['width'])
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def a__ ( self : Any , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def a__ ( self : Union[str, Any] , A_ : np.ndarray , A_ : Union[float, Iterable[float]] , A_ : Union[float, Iterable[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : int , ) -> np.ndarray:
"""simple docstring"""
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def a__ ( self : List[Any] , A_ : ImageInput , A_ : bool = None , A_ : Dict[str, int] = None , A_ : Dict=None , A_ : bool = None , A_ : float = None , A_ : bool = None , A_ : Union[float, Iterable[float]] = None , A_ : Union[float, Iterable[float]] = None , A_ : bool = None , A_ : Optional[str] = None , A_ : Optional[str] = None , A_ : Optional[Union[str, TensorType]] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : Any , ) -> PIL.Image.Image:
"""simple docstring"""
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(A_ )
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCamelCase_ = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCamelCase_ = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCamelCase_ = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(A_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , 'pytesseract' )
lowerCamelCase_ = []
lowerCamelCase_ = []
for image in images:
lowerCamelCase_ , lowerCamelCase_ = apply_tesseract(A_ , A_ , A_ )
words_batch.append(A_ )
boxes_batch.append(A_ )
if do_resize:
lowerCamelCase_ = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(A_ , A_ ) for image in images]
lowerCamelCase_ = BatchFeature(data={'pixel_values': images} , tensor_type=A_ )
if apply_ocr:
lowerCamelCase_ = words_batch
lowerCamelCase_ = boxes_batch
return data
| 204
| 1
|
from __future__ import annotations
from math import pi, sqrt
def lowercase__ ( __snake_case : float , __snake_case : float ):
'''simple docstring'''
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__UpperCAmelCase = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__UpperCAmelCase = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = SavedModel()
UpperCAmelCase_ : Optional[Any] = []
with open(os.path.join(__snake_case , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
UpperCAmelCase_ : Optional[Any] = json.load(__snake_case )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__snake_case )] )
with open(__snake_case , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
UpperCAmelCase_ : List[Any] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCAmelCase_ : Optional[int] = sorted(__snake_case )
UpperCAmelCase_ : int = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__snake_case )
if strict and len(__snake_case ) > 0:
raise Exception(F"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(__snake_case ) > 0:
print(F"Found the following incompatible ops for the opset {opset}:" )
print(*__snake_case , sep='\n' )
else:
print(F"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
__UpperCAmelCase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 145
| 0
|
'''simple docstring'''
def A_ ( snake_case ):
if not isinstance(__A , __A ):
raise ValueError("multiplicative_persistence() only accepts integral values" )
if num < 0:
raise ValueError("multiplicative_persistence() does not accept negative values" )
SCREAMING_SNAKE_CASE:List[Any] = 0
SCREAMING_SNAKE_CASE:Any = str(__A )
while len(__A ) != 1:
SCREAMING_SNAKE_CASE:Tuple = [int(__A ) for i in num_string]
SCREAMING_SNAKE_CASE:Tuple = 1
for i in range(0 , len(__A ) ):
total *= numbers[i]
SCREAMING_SNAKE_CASE:List[Any] = str(__A )
steps += 1
return steps
def A_ ( snake_case ):
if not isinstance(__A , __A ):
raise ValueError("additive_persistence() only accepts integral values" )
if num < 0:
raise ValueError("additive_persistence() does not accept negative values" )
SCREAMING_SNAKE_CASE:Dict = 0
SCREAMING_SNAKE_CASE:Tuple = str(__A )
while len(__A ) != 1:
SCREAMING_SNAKE_CASE:Dict = [int(__A ) for i in num_string]
SCREAMING_SNAKE_CASE:Union[str, Any] = 0
for i in range(0 , len(__A ) ):
total += numbers[i]
SCREAMING_SNAKE_CASE:int = str(__A )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def lowerCAmelCase (__A):
"""simple docstring"""
if len(__A) != 32:
raise ValueError('''Input must be of length 32''')
_a = b''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowerCAmelCase (__A):
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''')
_a = format(__A , '''08x''')[-8:]
_a = b''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''')
return little_endian_hex
def lowerCAmelCase (__A):
"""simple docstring"""
_a = b''''''
for char in message:
bit_string += format(__A , '''08b''').encode('''utf-8''')
_a = format(len(__A) , '''064b''').encode('''utf-8''')
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__A) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:]) + to_little_endian(start_len[:32])
return bit_string
def lowerCAmelCase (__A):
"""simple docstring"""
if len(__A) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''')
for pos in range(0 , len(__A) , 512):
_a = bit_string[pos : pos + 512]
_a = []
for i in range(0 , 512 , 32):
block_words.append(int(to_little_endian(block[i : i + 32]) , 2))
yield block_words
def lowerCAmelCase (__A):
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''')
_a = format(__A , '''032b''')
_a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__A , 2)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return (a + b) % 2**32
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''')
if shift < 0:
raise ValueError('''Shift must be non-negative''')
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowerCAmelCase (__A):
"""simple docstring"""
_a = preprocess(__A)
_a = [int(2**32 * abs(sin(i + 1))) for i in range(64)]
# Starting states
_a = 0x67_452_301
_a = 0xEF_CDA_B89
_a = 0x98_BAD_CFE
_a = 0x10_325_476
_a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__A):
_a = aa
_a = ba
_a = ca
_a = da
# Hash current chunk
for i in range(64):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a = d ^ (b & (c ^ d))
_a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a = c ^ (d & (b ^ c))
_a = (5 * i + 1) % 16
elif i <= 47:
_a = b ^ c ^ d
_a = (3 * i + 5) % 16
else:
_a = c ^ (b | not_aa(__A))
_a = (7 * i) % 16
_a = (f + a + added_consts[i] + block_words[g]) % 2**32
_a = d
_a = c
_a = b
_a = sum_aa(__A , left_rotate_aa(__A , shift_amounts[i]))
# Add hashed chunk to running total
_a = sum_aa(__A , __A)
_a = sum_aa(__A , __A)
_a = sum_aa(__A , __A)
_a = sum_aa(__A , __A)
_a = reformat_hex(__A) + reformat_hex(__A) + reformat_hex(__A) + reformat_hex(__A)
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211
| 0
|
from typing import Any
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> list:
_validation(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
# Creates data structures and fill initial step
lowercase : dict = {}
lowercase : dict = {}
for state in states_space:
lowercase : List[Any] = observations_space[0]
lowercase : Dict = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowercase : List[Any] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
lowercase : Union[str, Any] = observations_space[o]
lowercase : Optional[Any] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowercase : Optional[Any] = """"""
lowercase : str = -1
for k_state in states_space:
lowercase : Optional[int] = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowercase : Union[str, Any] = probability
lowercase : Optional[int] = k_state
# Update probabilities and pointers dicts
lowercase : int = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowercase : int = arg_max
# The final observation
lowercase : List[str] = observations_space[len(SCREAMING_SNAKE_CASE__ ) - 1]
# argmax for given final observation
lowercase : List[str] = """"""
lowercase : Any = -1
for k_state in states_space:
lowercase : str = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowercase : Optional[Any] = probability
lowercase : Optional[int] = k_state
lowercase : Optional[Any] = arg_max
# Process pointers backwards
lowercase : List[str] = last_state
lowercase : List[Any] = []
for o in range(len(SCREAMING_SNAKE_CASE__ ) - 1 , -1 , -1 ):
result.append(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = pointers[previous, observations_space[o]]
result.reverse()
return result
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> None:
_validate_not_empty(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
_validate_lists(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_validate_dicts(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> None:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There's an empty parameter""" )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
_validate_list(SCREAMING_SNAKE_CASE__ , """observations_space""" )
_validate_list(SCREAMING_SNAKE_CASE__ , """states_space""" )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
if not isinstance(_object , SCREAMING_SNAKE_CASE__ ):
lowercase : str = f"{var_name} must be a list"
raise ValueError(SCREAMING_SNAKE_CASE__ )
else:
for x in _object:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = f"{var_name} must be a list of strings"
raise ValueError(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> None:
_validate_dict(SCREAMING_SNAKE_CASE__ , """initial_probabilities""" , SCREAMING_SNAKE_CASE__ )
_validate_nested_dict(SCREAMING_SNAKE_CASE__ , """transition_probabilities""" )
_validate_nested_dict(SCREAMING_SNAKE_CASE__ , """emission_probabilities""" )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
_validate_dict(_object , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for x in _object.values():
_validate_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ) -> None:
if not isinstance(_object , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = f"{var_name} must be a dict"
raise ValueError(SCREAMING_SNAKE_CASE__ )
if not all(isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for x in _object ):
lowercase : List[Any] = f"{var_name} all keys must be strings"
raise ValueError(SCREAMING_SNAKE_CASE__ )
if not all(isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for x in _object.values() ):
lowercase : Optional[Any] = """nested dictionary """ if nested else """"""
lowercase : Optional[Any] = f"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 285
|
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowercase : str = mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
lowercase : Optional[int] = max(
mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - wt[i - 1] ) + val[i - 1] , )
lowercase : List[Any] = val
return f[i][j]
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
lowercase : Optional[int] = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
lowercase : Union[str, Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
lowercase : Any = dp[i - 1][w_]
return dp[n][w_], dp
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
if not (isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
lowercase : Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
if num_items != len(SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = (
"""The number of weights must be the same as the number of values.\n"""
f"But got {num_items} weights and {len(SCREAMING_SNAKE_CASE__ )} values"
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ ):
if not isinstance(wt[i] , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = (
"""All weights must be integers but got weight of """
f"type {type(wt[i] )} at index {i}"
)
raise TypeError(SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Any = knapsack(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : set = set()
_construct_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return optimal_val, example_optional_set
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
optimal_set.add(SCREAMING_SNAKE_CASE__ )
_construct_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i - 1 , j - wt[i - 1] , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase : Dict = [3, 2, 4, 4]
lowercase : List[Any] = [4, 3, 2, 3]
lowercase : Tuple = 4
lowercase : Tuple = 6
lowercase : Tuple = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowercase , lowercase : List[str] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowercase , lowercase : Optional[int] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 285
| 1
|
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return str(_A ) == str(_A )[::-1]
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return int(_A ) + int(str(_A )[::-1] )
def UpperCAmelCase_ ( _A = 1_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for num in range(1 , _A ):
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = num
while iterations < 50:
SCREAMING_SNAKE_CASE__ = sum_reverse(_A )
iterations += 1
if is_palindrome(_A ):
break
else:
lychrel_nums.append(_A )
return len(_A )
if __name__ == "__main__":
print(F"{solution() = }")
| 314
|
from __future__ import annotations
def UpperCAmelCase_ ( _A , _A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = word_bank or []
# create a table
SCREAMING_SNAKE_CASE__ = len(_A ) + 1
SCREAMING_SNAKE_CASE__ = []
for _ in range(_A ):
table.append([] )
# seed value
SCREAMING_SNAKE_CASE__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_A ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_A )] == word:
SCREAMING_SNAKE_CASE__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_A )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_A )]:
combination.reverse()
return table[len(_A )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 314
| 1
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ :Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase__ :str = 25_0004
lowercase__ :int = 25_0020
@require_sentencepiece
@require_tokenizers
class lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase_ : Any =MBartaaTokenizer
lowercase_ : Dict =MBartaaTokenizerFast
lowercase_ : Tuple =True
lowercase_ : int =True
def A__ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase = MBartaaTokenizer(UpperCamelCase__ ,src_lang='''en_XX''' ,tgt_lang='''ro_RO''' ,keep_accents=UpperCamelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self):
lowercase = '''<s>'''
lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__) ,UpperCamelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__) ,UpperCamelCase__)
def A__ ( self):
lowercase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] ,'''<s>''')
self.assertEqual(vocab_keys[1] ,'''<pad>''')
self.assertEqual(vocab_keys[-1] ,'''<mask>''')
self.assertEqual(len(UpperCamelCase__) ,1_0_5_4)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size ,1_0_5_4)
def A__ ( self):
lowercase = MBartaaTokenizer(UpperCamelCase__ ,src_lang='''en_XX''' ,tgt_lang='''ro_RO''' ,keep_accents=UpperCamelCase__)
lowercase = tokenizer.tokenize('''This is a test''')
self.assertListEqual(UpperCamelCase__ ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__) ,[value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] ,)
lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
UpperCamelCase__ ,[SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] ,)
lowercase = tokenizer.convert_tokens_to_ids(UpperCamelCase__)
self.assertListEqual(
UpperCamelCase__ ,[
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] ,)
lowercase = tokenizer.convert_ids_to_tokens(UpperCamelCase__)
self.assertListEqual(
UpperCamelCase__ ,[SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] ,)
@slow
def A__ ( self):
lowercase = {'''input_ids''': [[2_5_0_0_0_4, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [2_5_0_0_0_4, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_0_0_0_4, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ ,model_name='''facebook/mbart-large-50''' ,revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' ,)
def A__ ( self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowercase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ ,**UpperCamelCase__)
lowercase = self.tokenizer_class.from_pretrained(UpperCamelCase__ ,**UpperCamelCase__)
lowercase = tempfile.mkdtemp()
lowercase = tokenizer_r.save_pretrained(UpperCamelCase__)
lowercase = tokenizer_p.save_pretrained(UpperCamelCase__)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files))
lowercase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f)
self.assertSequenceEqual(UpperCamelCase__ ,UpperCamelCase__)
# Checks everything loads correctly in the same way
lowercase = tokenizer_r.from_pretrained(UpperCamelCase__)
lowercase = tokenizer_p.from_pretrained(UpperCamelCase__)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase__ ,UpperCamelCase__))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCamelCase__)
# Save tokenizer rust, legacy_format=True
lowercase = tempfile.mkdtemp()
lowercase = tokenizer_r.save_pretrained(UpperCamelCase__ ,legacy_format=UpperCamelCase__)
lowercase = tokenizer_p.save_pretrained(UpperCamelCase__)
# Checks it save with the same files
self.assertSequenceEqual(UpperCamelCase__ ,UpperCamelCase__)
# Checks everything loads correctly in the same way
lowercase = tokenizer_r.from_pretrained(UpperCamelCase__)
lowercase = tokenizer_p.from_pretrained(UpperCamelCase__)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase__ ,UpperCamelCase__))
shutil.rmtree(UpperCamelCase__)
# Save tokenizer rust, legacy_format=False
lowercase = tempfile.mkdtemp()
lowercase = tokenizer_r.save_pretrained(UpperCamelCase__ ,legacy_format=UpperCamelCase__)
lowercase = tokenizer_p.save_pretrained(UpperCamelCase__)
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
lowercase = tokenizer_r.from_pretrained(UpperCamelCase__)
lowercase = tokenizer_p.from_pretrained(UpperCamelCase__)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase__ ,UpperCamelCase__))
shutil.rmtree(UpperCamelCase__)
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
lowercase_ : List[str] ='''facebook/mbart-large-50-one-to-many-mmt'''
lowercase_ : int =[
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
lowercase_ : Optional[Any] =[
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
lowercase_ : Dict =[EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2]
@classmethod
def A__ ( cls):
lowercase = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang='''en_XX''' ,tgt_lang='''ro_RO''')
lowercase = 1
return cls
def A__ ( self):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] ,2_5_0_0_0_1)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] ,2_5_0_0_0_4)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] ,2_5_0_0_2_0)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] ,2_5_0_0_3_8)
def A__ ( self):
lowercase = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,UpperCamelCase__)
def A__ ( self):
self.assertIn(UpperCamelCase__ ,self.tokenizer.all_special_ids)
lowercase = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
lowercase = self.tokenizer.decode(UpperCamelCase__ ,skip_special_tokens=UpperCamelCase__)
lowercase = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=UpperCamelCase__)
self.assertEqual(UpperCamelCase__ ,UpperCamelCase__)
self.assertNotIn(self.tokenizer.eos_token ,UpperCamelCase__)
def A__ ( self):
lowercase = ['''this is gunna be a long sentence ''' * 2_0]
assert isinstance(src_text[0] ,UpperCamelCase__)
lowercase = 1_0
lowercase = self.tokenizer(UpperCamelCase__ ,max_length=UpperCamelCase__ ,truncation=UpperCamelCase__).input_ids[0]
self.assertEqual(ids[0] ,UpperCamelCase__)
self.assertEqual(ids[-1] ,2)
self.assertEqual(len(UpperCamelCase__) ,UpperCamelCase__)
def A__ ( self):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR''']) ,[2_5_0_0_5_3, 2_5_0_0_0_1])
def A__ ( self):
lowercase = tempfile.mkdtemp()
lowercase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase__)
lowercase = MBartaaTokenizer.from_pretrained(UpperCamelCase__)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,UpperCamelCase__)
@require_torch
def A__ ( self):
lowercase = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=UpperCamelCase__ ,return_tensors='''pt''')
lowercase = shift_tokens_right(batch['''labels'''] ,self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def A__ ( self):
lowercase = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=UpperCamelCase__ ,truncation=UpperCamelCase__ ,max_length=len(self.expected_src_tokens) ,return_tensors='''pt''' ,)
lowercase = shift_tokens_right(batch['''labels'''] ,self.tokenizer.pad_token_id)
self.assertIsInstance(UpperCamelCase__ ,UpperCamelCase__)
self.assertEqual((2, 1_4) ,batch.input_ids.shape)
self.assertEqual((2, 1_4) ,batch.attention_mask.shape)
lowercase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,UpperCamelCase__)
self.assertEqual(2 ,batch.decoder_input_ids[0, 0]) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[EN_CODE])
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id])
def A__ ( self):
lowercase = self.tokenizer(self.src_text ,padding=UpperCamelCase__ ,truncation=UpperCamelCase__ ,max_length=3 ,return_tensors='''pt''')
lowercase = self.tokenizer(
text_target=self.tgt_text ,padding=UpperCamelCase__ ,truncation=UpperCamelCase__ ,max_length=1_0 ,return_tensors='''pt''')
lowercase = targets['''input_ids''']
lowercase = shift_tokens_right(UpperCamelCase__ ,self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] ,3)
self.assertEqual(batch.decoder_input_ids.shape[1] ,1_0)
@require_torch
def A__ ( self):
lowercase = self.tokenizer._build_translation_inputs(
'''A test''' ,return_tensors='''pt''' ,src_lang='''en_XX''' ,tgt_lang='''ar_AR''')
self.assertEqual(
nested_simplify(UpperCamelCase__) ,{
# en_XX, A, test, EOS
'''input_ids''': [[2_5_0_0_0_4, 6_2, 3_0_3_4, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_0_0_0_1,
} ,)
| 367
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowerCAmelCase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowercase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
lowercase = [[0.0, 0.0], [0.0, 0.0]]
lowercase , lowercase = matrix[1][1], matrix[0][0]
lowercase , lowercase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowerCAmelCase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowerCAmelCase__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowercase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
lowercase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowercase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowercase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowercase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowercase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowercase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowercase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowercase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowercase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowercase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowercase = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
lowercase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowercase = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowerCAmelCase__ )
# Calculate the inverse of the matrix
return [[float(d(lowerCAmelCase__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 97
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = None
# Automatically constructed
__UpperCamelCase = "dict"
__UpperCamelCase = None
__UpperCamelCase = field(default="Translation" , init=UpperCAmelCase__ , repr=UpperCAmelCase__ )
def __call__( self : List[str]):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
from .features import Value
return {k: Value('''string''') for k in sorted(self.languages)}
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
# Automatically constructed
__UpperCamelCase = "dict"
__UpperCamelCase = None
__UpperCamelCase = field(default="TranslationVariableLanguages" , init=UpperCAmelCase__ , repr=UpperCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = sorted(set(self.languages)) if self.languages else None
SCREAMING_SNAKE_CASE_ : Any = len(self.languages) if self.languages else None
def __call__( self : Optional[int]):
'''simple docstring'''
return pa.struct({'''language''': pa.list_(pa.string()), '''translation''': pa.list_(pa.string())})
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = set(self.languages)
if self.languages and set(lowercase_) - lang_set:
raise ValueError(
F'Some languages in example ({", ".join(sorted(set(lowercase_) - lang_set))}) are not in valid set ({", ".join(lowercase_)}).')
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
SCREAMING_SNAKE_CASE_ : int = []
for lang, text in translation_dict.items():
if isinstance(lowercase_ , lowercase_):
translation_tuples.append((lang, text))
else:
translation_tuples.extend([(lang, el) for el in text])
# Ensure translations are in ascending order by language code.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = zip(*sorted(lowercase_))
return {"language": languages, "translation": translations}
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''')),
"translation": Sequence(Value('''string''')),
}
| 91
|
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
_SCREAMING_SNAKE_CASE : List[str] = "hf-internal-testing/tiny-random-bert"
_SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
_SCREAMING_SNAKE_CASE : Optional[int] = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
snake_case = cached_file(__snake_case , __snake_case )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__snake_case ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__snake_case , __snake_case ) ) )
with open(os.path.join(__snake_case , '''refs''' , '''main''' ) ) as f:
snake_case = f.read()
self.assertEqual(__snake_case , os.path.join(__snake_case , '''snapshots''' , __snake_case , __snake_case ) )
self.assertTrue(os.path.isfile(__snake_case ) )
# File is cached at the same place the second time.
snake_case = cached_file(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
# Using a specific revision to test the full commit hash.
snake_case = cached_file(__snake_case , __snake_case , revision='''9b8c223''' )
self.assertEqual(__snake_case , os.path.join(__snake_case , '''snapshots''' , __snake_case , __snake_case ) )
def a_ ( self ):
with self.assertRaisesRegex(__snake_case , '''is not a valid model identifier''' ):
snake_case = cached_file('''tiny-random-bert''' , __snake_case )
with self.assertRaisesRegex(__snake_case , '''is not a valid git identifier''' ):
snake_case = cached_file(__snake_case , __snake_case , revision='''aaaa''' )
with self.assertRaisesRegex(__snake_case , '''does not appear to have a file named''' ):
snake_case = cached_file(__snake_case , '''conf''' )
def a_ ( self ):
with self.assertRaisesRegex(__snake_case , '''does not appear to have a file named''' ):
snake_case = cached_file(__snake_case , '''conf''' )
with open(os.path.join(__snake_case , '''refs''' , '''main''' ) ) as f:
snake_case = f.read()
self.assertTrue(os.path.isfile(os.path.join(__snake_case , '''.no_exist''' , __snake_case , '''conf''' ) ) )
snake_case = cached_file(__snake_case , '''conf''' , _raise_exceptions_for_missing_entries=__snake_case )
self.assertIsNone(__snake_case )
snake_case = cached_file(__snake_case , '''conf''' , local_files_only=__snake_case , _raise_exceptions_for_missing_entries=__snake_case )
self.assertIsNone(__snake_case )
snake_case = mock.Mock()
snake_case = 5_0_0
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__snake_case ) as mock_head:
snake_case = cached_file(__snake_case , '''conf''' , _raise_exceptions_for_connection_errors=__snake_case )
self.assertIsNone(__snake_case )
# This check we did call the fake head request
mock_head.assert_called()
def a_ ( self ):
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __snake_case ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __snake_case ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __snake_case ) )
def a_ ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__snake_case , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , __snake_case )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__snake_case , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , __snake_case , revision='''ahaha''' )
snake_case = get_file_from_repo('''bert-base-cased''' , __snake_case )
# The name is the cached name which is not very easy to test, so instead we load the content.
snake_case = json.loads(open(__snake_case , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_6_8 )
def a_ ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = Path(__snake_case ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(__snake_case , '''a.txt''' ) , str(__snake_case ) )
self.assertIsNone(get_file_from_repo(__snake_case , '''b.txt''' ) )
| 127
| 0
|
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCAmelCase ():
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__UpperCamelCase ="""__test_patch_submodule_mock__"""
with patch_submodule(_test_patching , '''os.path.join''' , _lowerCAmelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCAmelCase ():
"""simple docstring"""
assert _test_patching.open is open
__UpperCamelCase ="""__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , _lowerCAmelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase ="""__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching , '''pandas.read_csv''' , _lowerCAmelCase ):
pass
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase ="""__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , _lowerCAmelCase ) is None
with patch_submodule(_test_patching , '''len''' , _lowerCAmelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase ="""__test_patch_submodule_start_and_stop_mock__"""
__UpperCamelCase =patch_submodule(_test_patching , '''open''' , _lowerCAmelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCAmelCase ():
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__UpperCamelCase ="""__test_patch_submodule_successive_join__"""
__UpperCamelCase ="""__test_patch_submodule_successive_dirname__"""
__UpperCamelCase ="""__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , _lowerCAmelCase ):
with patch_submodule(_test_patching , '''os.rename''' , _lowerCAmelCase ):
with patch_submodule(_test_patching , '''os.path.dirname''' , _lowerCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , _lowerCAmelCase ):
with patch_submodule(_test_patching , '''os.path.join''' , _lowerCAmelCase ):
with patch_submodule(_test_patching , '''os.path.dirname''' , _lowerCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase ="""__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , _lowerCAmelCase ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , _lowerCAmelCase ):
pass
| 371
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowercase = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 85
| 0
|
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(a_ , n - 1 , a_ ) * a) % mod
else:
lowerCAmelCase__ : Any = binary_exponentiation(a_ , n / 2 , a_ )
return (b * b) % mod
# a prime number
_A = 7_0_1
_A = 1_0_0_0_0_0_0_0_0_0
_A = 1_0
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 242
|
'''simple docstring'''
def __UpperCAmelCase ( a_: int = 50 ):
_UpperCAmelCase : str = [1] * (length + 1)
for row_length in range(3, length + 1 ):
for block_length in range(3, row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 145
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> int:
A = tempfile.mkdtemp()
A = 8
# DPR tok
A = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
A = os.path.join(self.tmpdirname ,"""dpr_tokenizer""" )
os.makedirs(lowerCamelCase_ ,exist_ok=lowerCamelCase_ )
A = os.path.join(lowerCamelCase_ ,DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
A = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
A = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
A = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
A = {"""unk_token""": """<unk>"""}
A = os.path.join(self.tmpdirname ,"""bart_tokenizer""" )
os.makedirs(lowerCamelCase_ ,exist_ok=lowerCamelCase_ )
A = os.path.join(lowerCamelCase_ ,BART_VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(lowerCamelCase_ ,BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase_ ) )
def UpperCamelCase__ ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) )
def UpperCamelCase__ ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""bart_tokenizer""" ) )
def UpperCamelCase__ ( self ) -> Any:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def UpperCamelCase__ ( self ) -> List[Any]:
A = os.path.join(self.tmpdirname ,"""rag_tokenizer""" )
A = RagConfig(question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() )
A = RagTokenizer(question_encoder=self.get_dpr_tokenizer() ,generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(lowerCamelCase_ )
rag_tokenizer.save_pretrained(lowerCamelCase_ )
A = RagTokenizer.from_pretrained(lowerCamelCase_ ,config=lowerCamelCase_ )
self.assertIsInstance(new_rag_tokenizer.question_encoder ,lowerCamelCase_ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() ,rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator ,lowerCamelCase_ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() ,rag_tokenizer.generator.get_vocab() )
@slow
def UpperCamelCase__ ( self ) -> Union[str, Any]:
A = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
A = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
A = tokenizer(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@slow
def UpperCamelCase__ ( self ) -> Tuple:
A = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
A = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
A = tokenizer(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
| 77
|
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def _A ( _a : Callable[[int | float], int | float] , _a : int | float , _a : int | float , _a : int = 1_0_0 , ):
"""simple docstring"""
A = x_start
A = fnc(_a )
A = 0.0
for _ in range(_a ):
# Approximates curve as a sequence of linear lines and sums their length
A = (x_end - x_start) / steps + xa
A = fnc(_a )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
A = xa
A = fxa
return length
if __name__ == "__main__":
def _A ( _a : Tuple ):
"""simple docstring"""
return math.sin(1_0 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
UpperCAmelCase =10
while i <= 100_000:
print(f"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 77
| 1
|
from __future__ import annotations
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 2
snake_case_ = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(UpperCamelCase__ )
if n > 1:
factors.append(UpperCamelCase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285
|
import os
import numpy
import onnx
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = a.name
snake_case_ = b.name
snake_case_ = ''
snake_case_ = ''
snake_case_ = a == b
snake_case_ = name_a
snake_case_ = name_b
return res
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCamelCase__ , UpperCamelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCamelCase__ , UpperCamelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = list(model.graph.initializer )
snake_case_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
snake_case_ = inits[i].name
snake_case_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = os.path.dirname(UpperCamelCase__ )
snake_case_ = os.path.basename(UpperCamelCase__ )
snake_case_ = onnx.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case_ = list(model.graph.initializer )
snake_case_ = set()
snake_case_ = {}
snake_case_ = []
snake_case_ = 0
for i in range(len(UpperCamelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCamelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCamelCase__ )
dup_set.add(UpperCamelCase__ )
snake_case_ = inits[j].data_type
snake_case_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , UpperCamelCase__ )
total_reduced_size += mem_size
snake_case_ = inits[i].name
snake_case_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCamelCase__ )
else:
snake_case_ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
snake_case_ = sorted(UpperCamelCase__ )
_remove_dup_initializers_from_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case_ = 'optimized_' + model_file_name
snake_case_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
onnx.save(UpperCamelCase__ , UpperCamelCase__ )
return new_model
| 285
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 371
|
from __future__ import annotations
def A(__a: dict , __a: str ):
lowerCAmelCase_ , lowerCAmelCase_ = set(__a ), [start]
while stack:
lowerCAmelCase_ = stack.pop()
explored.add(__a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__a )
return explored
lowerCamelCase__ = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 22
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a =AutoConfig.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
a =TFAutoModel.from_pretrained(__A , from_pt=__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
a =AutoModel.from_pretrained(__A , from_tf=__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a =AutoConfig.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
a =TFAutoModelForPreTraining.from_pretrained(__A , from_pt=__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
a =AutoModelForPreTraining.from_pretrained(__A , from_tf=__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> int:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a =AutoConfig.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
a =TFAutoModelForCausalLM.from_pretrained(__A , from_pt=__A )
a , a =TFAutoModelForCausalLM.from_pretrained(
__A , output_loading_info=__A , from_pt=__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
a =AutoModelForCausalLM.from_pretrained(__A , from_tf=__A )
a , a =AutoModelForCausalLM.from_pretrained(
__A , output_loading_info=__A , from_tf=__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> str:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a =AutoConfig.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
a =TFAutoModelWithLMHead.from_pretrained(__A , from_pt=__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
a =AutoModelWithLMHead.from_pretrained(__A , from_tf=__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a =AutoConfig.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
a =TFAutoModelForMaskedLM.from_pretrained(__A , from_pt=__A )
a , a =TFAutoModelForMaskedLM.from_pretrained(
__A , output_loading_info=__A , from_pt=__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
a =AutoModelForMaskedLM.from_pretrained(__A , from_tf=__A )
a , a =AutoModelForMaskedLM.from_pretrained(
__A , output_loading_info=__A , from_tf=__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a =AutoConfig.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
a =TFAutoModelForSeqaSeqLM.from_pretrained(__A , from_pt=__A )
a , a =TFAutoModelForSeqaSeqLM.from_pretrained(
__A , output_loading_info=__A , from_pt=__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
a =AutoModelForSeqaSeqLM.from_pretrained(__A , from_tf=__A )
a , a =AutoModelForSeqaSeqLM.from_pretrained(
__A , output_loading_info=__A , from_tf=__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a =AutoConfig.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
a =TFAutoModelForSequenceClassification.from_pretrained(__A , from_pt=__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
a =AutoModelForSequenceClassification.from_pretrained(__A , from_tf=__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a =AutoConfig.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
a =TFAutoModelForQuestionAnswering.from_pretrained(__A , from_pt=__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
a =AutoModelForQuestionAnswering.from_pretrained(__A , from_tf=__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =TFAutoModelWithLMHead.from_pretrained(__A , from_pt=__A )
self.assertIsInstance(__A , __A )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__A ) , 1_4410 )
a =AutoModelWithLMHead.from_pretrained(__A , from_tf=__A )
self.assertIsInstance(__A , __A )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__A ) , 1_4410 )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =TFAutoModelWithLMHead.from_pretrained(__A , from_pt=__A )
self.assertIsInstance(__A , __A )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__A ) , 1_4410 )
a =AutoModelWithLMHead.from_pretrained(__A , from_tf=__A )
self.assertIsInstance(__A , __A )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__A ) , 1_4410 )
| 81
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 97
| 0
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def A_ ( self : Dict ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
@property
def A_ ( self : Dict ):
torch.manual_seed(0 )
UpperCamelCase__ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , )
return model
@property
def A_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(_a )
def A_ ( self : Optional[int] ):
UpperCamelCase__ = self.dummy_uncond_unet
UpperCamelCase__ = DDIMScheduler()
UpperCamelCase__ = self.dummy_vq_model
UpperCamelCase__ = LDMPipeline(unet=_a , vqvae=_a , scheduler=_a )
ldm.to(_a )
ldm.set_progress_bar_config(disable=_a )
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = ldm(generator=_a , num_inference_steps=2 , output_type='''numpy''' ).images
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = ldm(generator=_a , num_inference_steps=2 , output_type='''numpy''' , return_dict=_a )[0]
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCamelCase__ = 1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : Optional[int] ):
UpperCamelCase__ = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' )
ldm.to(_a )
ldm.set_progress_bar_config(disable=_a )
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = ldm(generator=_a , num_inference_steps=5 , output_type='''numpy''' ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase__ = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCamelCase__ = 1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 35
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase = {
"""configuration_xlm_roberta""": [
"""XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaConfig""",
"""XLMRobertaOnnxConfig""",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["""XLMRobertaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["""XLMRobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaForCausalLM""",
"""XLMRobertaForMaskedLM""",
"""XLMRobertaForMultipleChoice""",
"""XLMRobertaForQuestionAnswering""",
"""XLMRobertaForSequenceClassification""",
"""XLMRobertaForTokenClassification""",
"""XLMRobertaModel""",
"""XLMRobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMRobertaForCausalLM""",
"""TFXLMRobertaForMaskedLM""",
"""TFXLMRobertaForMultipleChoice""",
"""TFXLMRobertaForQuestionAnswering""",
"""TFXLMRobertaForSequenceClassification""",
"""TFXLMRobertaForTokenClassification""",
"""TFXLMRobertaModel""",
"""TFXLMRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxXLMRobertaForMaskedLM""",
"""FlaxXLMRobertaForCausalLM""",
"""FlaxXLMRobertaForMultipleChoice""",
"""FlaxXLMRobertaForQuestionAnswering""",
"""FlaxXLMRobertaForSequenceClassification""",
"""FlaxXLMRobertaForTokenClassification""",
"""FlaxXLMRobertaModel""",
"""FlaxXLMRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 35
| 1
|
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
for i in range(0 , UpperCamelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
for i in range(UpperCamelCase , 0 , -1 ):
for _ in range(UpperCamelCase , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(UpperCamelCase ) # upper half
reverse_floyd(UpperCamelCase ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
_A : List[Any] =1
while K:
_A : Optional[int] =int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
_A : Union[str, Any] =int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 41
|
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_SCREAMING_SNAKE_CASE : Any = False
try:
_SCREAMING_SNAKE_CASE : Optional[Any] = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class _snake_case :
def __init__( self , a__ = None , a__ = [] ) -> List[str]:
'''simple docstring'''
snake_case_ = 0
snake_case_ = choices
snake_case_ = prompt
if sys.platform == "win32":
snake_case_ = "*"
else:
snake_case_ = "➔ "
def lowerCAmelCase__ ( self , a__ , a__ = "" ) -> int:
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , a__ )
else:
forceWrite(self.choices[index] , a__ )
def lowerCAmelCase__ ( self , a__ ) -> Tuple:
'''simple docstring'''
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(a__ )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def lowerCAmelCase__ ( self , a__ , a__ = 1 ) -> List[str]:
'''simple docstring'''
snake_case_ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a__ )
move_cursor(a__ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a__ )] for number in range(10 )] )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = int(chr(self.current_selection ) )
snake_case_ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , a__ )
else:
return
else:
return
def lowerCAmelCase__ ( self , a__ = 0 ) -> List[str]:
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
snake_case_ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a__ )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
snake_case_ = int(builtins.input() )
except ValueError:
snake_case_ = default_choice
else:
snake_case_ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(a__ , "\n" )
return choice
| 85
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = '▁'
UpperCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'}
UpperCAmelCase_ = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
UpperCAmelCase_ = {
'facebook/xglm-564M': 2_0_4_8,
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = VOCAB_FILES_NAMES
lowerCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str="<s>" , _UpperCAmelCase : Optional[Any]="</s>" , _UpperCAmelCase : Optional[Any]="</s>" , _UpperCAmelCase : Optional[Any]="<s>" , _UpperCAmelCase : List[Any]="<unk>" , _UpperCAmelCase : Dict="<pad>" , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : Optional[int] , ):
"""simple docstring"""
UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCAmelCase__ = 7
UpperCAmelCase__ = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
UpperCAmelCase__ = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCAmelCase ) )
UpperCAmelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase__ = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase__ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
UpperCAmelCase__ = len(self.sp_model )
UpperCAmelCase__ = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_UpperCAmelCase )
UpperCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.__dict__.copy()
UpperCAmelCase__ = None
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str , _UpperCAmelCase : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCAmelCase__ = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase ))
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase ))
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : str ):
"""simple docstring"""
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase__ = self.sp_model.PieceToId(_UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[Any] ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = """""".join(_UpperCAmelCase ).replace(_UpperCAmelCase , """ """ ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase__ = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , """wb""" ) as fi:
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
| 361
|
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : set ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ), len(grid[0] )
if (
min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
UpperCAmelCase__ = 0
count += depth_first_search(SCREAMING_SNAKE_CASE__ , row + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += depth_first_search(SCREAMING_SNAKE_CASE__ , row - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += depth_first_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , col + 1 , SCREAMING_SNAKE_CASE__ )
count += depth_first_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , col - 1 , SCREAMING_SNAKE_CASE__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61
| 0
|
"""simple docstring"""
from collections import defaultdict
from math import gcd
def a_ ( _lowerCAmelCase : int = 150_0000 ):
'''simple docstring'''
lowercase__ : defaultdict = defaultdict(_lowerCAmelCase )
lowercase__ : int = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , _lowerCAmelCase , 2 ):
if gcd(_lowerCAmelCase , _lowerCAmelCase ) > 1:
continue
lowercase__ : Tuple = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(_lowerCAmelCase , limit + 1 , _lowerCAmelCase ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 77
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Tuple = logging.get_logger(__name__)
_UpperCamelCase : Union[str, Any] = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : List[Any] = "mgp-str"
def __init__( self , a=[3_2, 1_2_8] , a=4 , a=3 , a=2_7 , a=3_8 , a=5_0_2_5_7 , a=3_0_5_2_2 , a=7_6_8 , a=1_2 , a=1_2 , a=4.0 , a=True , a=False , a=1e-5 , a=0.0 , a=0.0 , a=0.0 , a=False , a=0.02 , **a , ) -> Tuple:
super().__init__(**a )
lowercase__ : int = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Optional[Any] = max_token_length
lowercase__ : Dict = num_character_labels
lowercase__ : Optional[int] = num_bpe_labels
lowercase__ : Dict = num_wordpiece_labels
lowercase__ : Tuple = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : Dict = mlp_ratio
lowercase__ : Optional[int] = distilled
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : Optional[int] = drop_rate
lowercase__ : List[str] = qkv_bias
lowercase__ : Optional[int] = attn_drop_rate
lowercase__ : Any = drop_path_rate
lowercase__ : List[Any] = output_aa_attentions
lowercase__ : Tuple = initializer_range
| 77
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowercase__ : Optional[int] = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 289
|
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : Optional[int] = 1_6
lowercase__ : List[str] = 3_2
def UpperCamelCase_ ( lowerCAmelCase__ : Accelerator , lowerCAmelCase__ : int = 16 ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCAmelCase_ : Union[str, Any] = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCAmelCase__ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ : Dict = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ : Dict = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ : int = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCAmelCase__ : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ : Dict = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ : str = 8
else:
lowerCAmelCase_ : str = None
return tokenizer.pad(
lowerCAmelCase__ , padding='longest' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCAmelCase_ : List[Any] = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ : int = mocked_dataloaders # noqa: F811
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowerCAmelCase__ ) == "1":
lowerCAmelCase_ : Optional[int] = 2
# Initialize accelerator
lowerCAmelCase_ : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ : Optional[int] = config['lr']
lowerCAmelCase_ : Tuple = int(config['num_epochs'] )
lowerCAmelCase_ : int = int(config['seed'] )
lowerCAmelCase_ : str = int(config['batch_size'] )
lowerCAmelCase_ : str = evaluate.load('glue' , 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCAmelCase__ )
def inner_training_loop(lowerCAmelCase__ : Optional[int] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ : str = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ : int = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ : List[Any] = AdamW(params=model.parameters() , lr=lowerCAmelCase__ )
lowerCAmelCase_ ,lowerCAmelCase_ : Any = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate scheduler
lowerCAmelCase_ : Dict = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : List[Any] = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Now we train the model
for epoch in range(lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase_ : List[str] = model(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = outputs.loss
accelerator.backward(lowerCAmelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ : Union[str, Any] = model(**lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
lowerCAmelCase_ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , lowerCAmelCase__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def UpperCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : int = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowerCAmelCase_ : str = parser.parse_args()
lowerCAmelCase_ : Union[str, Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 289
| 1
|
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _a ( lowerCAmelCase_ ):
"""simple docstring"""
def __A ( self : List[Any] ):
A_ = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def __A ( self : Optional[Any] ):
with self.assertRaises(snake_case_ ):
A_ = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def __A ( self : Dict ):
with self.assertRaises(snake_case_ ):
A_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def __A ( self : str ):
A_ = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __A ( self : Union[str, Any] ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
A_ = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def __A ( self : int ):
A_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __A ( self : Dict ):
A_ = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def __A ( self : int ):
A_ = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def __A ( self : Dict ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
A_ = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def __A ( self : str ):
A_ = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def __A ( self : Tuple ):
A_ = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def __A ( self : Optional[int] ):
import PIL.Image
A_ = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=snake_case_ ) as mock_cast_to_python_objects:
A_ = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) )
A_ , A_ = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , snake_case_ )
self.assertFalse(kwargs["optimize_list_casting"] )
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = pa.BufferReader(__lowercase ) if isinstance(__lowercase ,pa.Buffer ) else pa.memory_map(__lowercase )
A_ = pa.ipc.open_stream(__lowercase )
A_ = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" ,[None, 1, 10] )
@pytest.mark.parametrize(
"fields" ,[None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = pa.BufferOutputStream()
A_ = pa.schema(__lowercase ) if fields else None
with ArrowWriter(stream=__lowercase ,schema=__lowercase ,writer_batch_size=__lowercase ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
A_ , A_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
A_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__lowercase ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __snake_case ( ):
"""simple docstring"""
A_ = pa.BufferOutputStream()
A_ = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=__lowercase ,features=__lowercase ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
A_ , A_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
A_ = pa.BufferReader(output.getvalue() )
A_ = pa.ipc.open_stream(__lowercase )
A_ = f.read_all()
A_ = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__lowercase )
@pytest.mark.parametrize("writer_batch_size" ,[None, 1, 10] )
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = pa.BufferOutputStream()
with ArrowWriter(
stream=__lowercase ,writer_batch_size=__lowercase ,hash_salt="split_name" ,check_duplicates=__lowercase ,) as writer:
with pytest.raises(__lowercase ):
writer.write({"col_1": "foo", "col_2": 1} ,key=[1, 2] )
A_ , A_ = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" ,[None, 2, 10] )
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = pa.BufferOutputStream()
with ArrowWriter(
stream=__lowercase ,writer_batch_size=__lowercase ,hash_salt="split_name" ,check_duplicates=__lowercase ,) as writer:
with pytest.raises(__lowercase ):
writer.write({"col_1": "foo", "col_2": 1} ,key=10 )
writer.write({"col_1": "bar", "col_2": 2} ,key=10 )
A_ , A_ = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" ,[None, 2, 10] )
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = pa.BufferOutputStream()
with ArrowWriter(
stream=__lowercase ,writer_batch_size=__lowercase ,hash_salt="split_name" ,check_duplicates=__lowercase ,) as writer:
writer.write({"col_1": "foo", "col_2": 1} ,key=1 )
writer.write({"col_1": "bar", "col_2": 2} ,key=2 )
A_ , A_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" ,[None, 1, 10] )
@pytest.mark.parametrize(
"fields" ,[None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = pa.BufferOutputStream()
A_ = pa.schema(__lowercase ) if fields else None
with ArrowWriter(stream=__lowercase ,schema=__lowercase ,writer_batch_size=__lowercase ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
A_ , A_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
A_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__lowercase ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" ,[None, 1, 10] )
@pytest.mark.parametrize(
"fields" ,[None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = pa.BufferOutputStream()
A_ = pa.schema(__lowercase ) if fields else None
with ArrowWriter(stream=__lowercase ,schema=__lowercase ,writer_batch_size=__lowercase ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
A_ , A_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
A_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__lowercase ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" ,[None, 1, 10] )
@pytest.mark.parametrize(
"fields" ,[None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = pa.BufferOutputStream()
A_ = pa.schema(__lowercase ) if fields else None
with ArrowWriter(stream=__lowercase ,schema=__lowercase ,writer_batch_size=__lowercase ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
A_ , A_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
A_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__lowercase ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __snake_case ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = {"col_1": pa.string(), "col_2": pa.intaa()}
A_ = os.path.join(__lowercase ,"test.arrow" )
with ArrowWriter(path=__lowercase ,schema=pa.schema(__lowercase ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
A_ , A_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__lowercase ,metadata=writer._schema.metadata )
_check_output(__lowercase ,1 )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
if pa.types.is_list(__lowercase ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
if isinstance(lst[0] ,__lowercase ):
change_first_primitive_element_in_list(lst[0] ,__lowercase )
else:
A_ = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" ,[(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" ,[[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : str ,__UpperCamelCase : Dict ):
"""simple docstring"""
A_ = pa.array(TypedSequence(__lowercase ,optimized_int_type=__lowercase ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" ,[
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] ,)
@pytest.mark.parametrize("sequence" ,[[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = pa.array(OptimizedTypedSequence(__lowercase ,col=__lowercase ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
A_ = copy.deepcopy(__lowercase )
A_ = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__lowercase ,__lowercase )
A_ = pa.array(OptimizedTypedSequence(__lowercase ,col=__lowercase ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" ,[False, True] )
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=__lowercase ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = "mock://dataset-train.arrow"
with ArrowWriter(path=__lowercase ,storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs ,type(__lowercase ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
A_ , A_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__lowercase )
def __snake_case ( ):
"""simple docstring"""
A_ = pa.BufferOutputStream()
with ParquetWriter(stream=__lowercase ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
A_ , A_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
A_ = pa.BufferReader(output.getvalue() )
A_ = pq.read_table(__lowercase )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" ,[False, True] )
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
import PIL.Image
A_ = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) ,dtype=np.uinta ) ).save(__lowercase ,format="png" )
A_ = pa.BufferOutputStream()
with ParquetWriter(
stream=__lowercase ,features=Features({"image": Image()} ) ,embed_local_files=__lowercase ) as writer:
writer.write({"image": image_path} )
writer.finalize()
A_ = pa.BufferReader(output.getvalue() )
A_ = pq.read_table(__lowercase )
A_ = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] ,__lowercase )
with open(__lowercase ,"rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __snake_case ( ):
"""simple docstring"""
A_ = pa.schema([pa.field("col_1" ,pa.string() ,nullable=__lowercase )] )
A_ = pa.BufferOutputStream()
with ArrowWriter(stream=__lowercase ) as writer:
writer._build_writer(inferred_schema=__lowercase )
assert writer._schema == pa.schema([pa.field("col_1" ,pa.string() )] )
| 312
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__SCREAMING_SNAKE_CASE :List[Any] = None
__SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE :List[Any] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
__SCREAMING_SNAKE_CASE :Optional[Any] = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
__SCREAMING_SNAKE_CASE :Optional[int] = '''▁'''
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
_lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : int = AlbertTokenizer
def __init__( self : Optional[Any] , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=True , snake_case_ : str=True , snake_case_ : Tuple=False , snake_case_ : List[Any]="[CLS]" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : str="<unk>" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : List[Any]="<pad>" , snake_case_ : List[str]="[CLS]" , snake_case_ : int="[MASK]" , **snake_case_ : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_UpperCAmelCase = (
AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ , normalized=snake_case_ )
if isinstance(snake_case_ , snake_case_ )
else mask_token
)
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def lowercase ( self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase ( self : Dict , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 22
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCamelCase_ = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
lowerCamelCase_ = {'''facebook/blenderbot-3B''': 1_28}
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE__ = BlenderbotTokenizer
def __init__(self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="replace" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCamelCase__ = getattr(SCREAMING_SNAKE_CASE_ , pre_tok_state.pop("""type""" ) )
UpperCamelCase__ = add_prefix_space
UpperCamelCase__ = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = add_prefix_space
UpperCamelCase__ = """post_processor"""
UpperCamelCase__ = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if tokenizer_component_instance:
UpperCamelCase__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase__ = tuple(state["""sep"""] )
if "cls" in state:
UpperCamelCase__ = tuple(state["""cls"""] )
UpperCamelCase__ = False
if state.get("""add_prefix_space""" , SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCamelCase__ = add_prefix_space
UpperCamelCase__ = True
if state.get("""trim_offsets""" , SCREAMING_SNAKE_CASE_ ) != trim_offsets:
UpperCamelCase__ = trim_offsets
UpperCamelCase__ = True
if changes_to_apply:
UpperCamelCase__ = getattr(SCREAMING_SNAKE_CASE_ , state.pop("""type""" ) )
UpperCamelCase__ = component_class(**SCREAMING_SNAKE_CASE_ )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase_ (self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else value
UpperCamelCase__ = value
def UpperCAmelCase_ (self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = kwargs.get("""is_split_into_words""" , SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = kwargs.get("""is_split_into_words""" , SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """ """.join(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.encode(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > self.model_max_length:
UpperCamelCase__ = input_ids[-self.model_max_length :]
logger.warning(F"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids
| 354
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
def __magic_name__ ( __a : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ = R"""\w+[.]\d+"""
UpperCamelCase__ = re.findall(__a , __a )
for pat in pats:
UpperCamelCase__ = key.replace(__a , """_""".join(pat.split(""".""" ) ) )
return key
def __magic_name__ ( __a : str , __a : Dict , __a : int ):
'''simple docstring'''
UpperCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCamelCase__ = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __magic_name__ ( __a : List[Any] , __a : List[Any] , __a : Optional[int]=42 ):
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCamelCase__ = flax_model.init_weights(PRNGKey(__a ) )
UpperCamelCase__ = flatten_dict(__a )
UpperCamelCase__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = rename_key(__a )
UpperCamelCase__ = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(__a )
return unflatten_dict(__a )
| 178
| 0
|
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__a = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__a = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "maskformer"
lowercase = {"hidden_size": "mask_feature_size"}
lowercase = ["resnet", "swin"]
lowercase = ["detr"]
def __init__( self : Any , snake_case_ : int = 256 , snake_case_ : int = 256 , snake_case_ : float = 0.1 , snake_case_ : bool = False , snake_case_ : Optional[Dict] = None , snake_case_ : Optional[Dict] = None , snake_case_ : float = 0.02 , snake_case_ : float = 1.0 , snake_case_ : float = 1.0 , snake_case_ : float = 1.0 , snake_case_ : float = 20.0 , snake_case_ : Optional[bool] = None , **snake_case_ : int , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
snake_case__ : Optional[Any] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(snake_case_ , snake_case_ ):
snake_case__ : Dict = backbone_config.pop("""model_type""" )
snake_case__ : int = CONFIG_MAPPING[backbone_model_type]
snake_case__ : Optional[int] = config_class.from_dict(snake_case_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
f"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
snake_case__ : Optional[Any] = DetrConfig()
else:
# verify that the decoder is supported
snake_case__ : List[str] = (
decoder_config.pop("""model_type""" ) if isinstance(snake_case_ , snake_case_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"Transformer Decoder {decoder_type} not supported, please use one of"
f" {','.join(self.decoders_supported )}" )
if isinstance(snake_case_ , snake_case_ ):
snake_case__ : Union[str, Any] = CONFIG_MAPPING[decoder_type]
snake_case__ : Union[str, Any] = config_class.from_dict(snake_case_ )
snake_case__ : Any = backbone_config
snake_case__ : Optional[int] = decoder_config
# main feature dimension for the model
snake_case__ : int = fpn_feature_size
snake_case__ : Any = mask_feature_size
# initializer
snake_case__ : str = init_std
snake_case__ : Optional[Any] = init_xavier_std
# Hungarian matcher && loss
snake_case__ : Optional[int] = cross_entropy_weight
snake_case__ : Dict = dice_weight
snake_case__ : List[str] = mask_weight
snake_case__ : Tuple = use_auxiliary_loss
snake_case__ : Any = no_object_weight
snake_case__ : List[str] = output_auxiliary_logits
snake_case__ : int = self.decoder_config.encoder_attention_heads
snake_case__ : str = self.decoder_config.num_hidden_layers
super().__init__(**snake_case_ )
@classmethod
def lowerCamelCase ( cls : int , snake_case_ : PretrainedConfig , snake_case_ : PretrainedConfig , **snake_case_ : int ):
return cls(
backbone_config=snake_case_ , decoder_config=snake_case_ , **snake_case_ , )
def lowerCamelCase ( self : Dict ):
snake_case__ : str = copy.deepcopy(self.__dict__ )
snake_case__ : int = self.backbone_config.to_dict()
snake_case__ : List[Any] = self.decoder_config.to_dict()
snake_case__ : int = self.__class__.model_type
return output
| 35
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
snake_case__ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : Tuple = """"""
else:
snake_case__ : Dict = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Optional[Any] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Tuple = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Any = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[int] = in_proj_bias[: config.hidden_size]
snake_case__ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Tuple = in_proj_bias[-config.hidden_size :]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : str = dct.pop(_lowerCAmelCase )
snake_case__ : Tuple = val
def __snake_case( ) -> Tuple:
snake_case__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str:
snake_case__ : Optional[int] = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ : Union[str, Any] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ : int = 1_000
snake_case__ : Any = """huggingface/label-files"""
snake_case__ : Optional[Any] = """imagenet-1k-id2label.json"""
snake_case__ : Tuple = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : List[Any] = idalabel
snake_case__ : List[str] = {v: k for k, v in idalabel.items()}
snake_case__ : Tuple = int(deit_name[-6:-4] )
snake_case__ : Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
snake_case__ : Tuple = 192
snake_case__ : Union[str, Any] = 768
snake_case__ : Tuple = 12
snake_case__ : Union[str, Any] = 3
elif deit_name[9:].startswith("""small""" ):
snake_case__ : str = 384
snake_case__ : Any = 1_536
snake_case__ : str = 12
snake_case__ : int = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
snake_case__ : Union[str, Any] = 1_024
snake_case__ : Any = 4_096
snake_case__ : List[Any] = 24
snake_case__ : Tuple = 16
# load original model from timm
snake_case__ : List[Any] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[Any] = timm_model.state_dict()
snake_case__ : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
snake_case__ : Optional[Any] = DeiTForImageClassificationWithTeacher(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ : List[Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ : Optional[Any] = DeiTImageProcessor(size=_lowerCAmelCase , crop_size=config.image_size )
snake_case__ : str = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case__ : Optional[Any] = encoding["""pixel_values"""]
snake_case__ : Tuple = model(_lowerCAmelCase )
snake_case__ : Optional[int] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__a = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 35
| 1
|
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : Optional[Any] = '''▁'''
lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : str = BertGenerationTokenizer
_A : int = False
_A : Dict = True
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase : str = BertGenerationTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = """<s>"""
__lowercase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__a ) , 1002 )
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase : str = BertGenerationTokenizer(__a , keep_accents=__a )
__lowercase : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [285, 46, 10, 170, 382] , )
__lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__lowercase : Tuple = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__lowercase : List[str] = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Any = """Hello World!"""
__lowercase : Tuple = [18536, 2260, 101]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : int = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
__lowercase : Any = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@require_torch
@slow
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__lowercase : int = list(self.big_tokenizer.get_vocab().keys() )[:10]
__lowercase : Optional[Any] = """ """.join(__a )
__lowercase : Union[str, Any] = self.big_tokenizer.encode_plus(__a , return_tensors="""pt""" , return_token_type_ids=__a )
__lowercase : List[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__a )
__lowercase : Optional[Any] = BertGenerationConfig()
__lowercase : Union[str, Any] = BertGenerationEncoder(__a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__a )
model(**__a )
@slow
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = {"""input_ids""": [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 360
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Dict , __a : List[str]=None , __a : Optional[Any]=None , __a : Union[str, Any]=None , __a : int="resnet50" , __a : List[str]=3 , __a : Tuple=32 , __a : Dict=3 , __a : List[str]=True , __a : Union[str, Any]=True , ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = parent
__lowercase : List[str] = out_indices if out_indices is not None else [4]
__lowercase : Optional[int] = stage_names
__lowercase : Any = out_features
__lowercase : Optional[Any] = backbone
__lowercase : Optional[Any] = batch_size
__lowercase : Union[str, Any] = image_size
__lowercase : List[str] = num_channels
__lowercase : str = use_pretrained_backbone
__lowercase : str = is_training
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : str = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Any ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TimmBackbone(config=__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Optional[Any] = model(__a )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.prepare_config_and_inputs()
__lowercase , __lowercase : str = config_and_inputs
__lowercase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = (TimmBackbone,) if is_torch_available() else ()
_A : Dict = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
_A : List[Any] = False
_A : List[str] = False
_A : Any = False
_A : Optional[Any] = False
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = TimmBackboneModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = """resnet18"""
__lowercase : Optional[int] = """microsoft/resnet-18"""
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a )
__lowercase : Dict = AutoBackbone.from_pretrained(__a )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] )
__lowercase : Optional[Any] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : List[str] = [*signature.parameters.keys()]
__lowercase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Optional[Any] = True
__lowercase : Union[str, Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowercase : Union[str, Any] = self.all_model_classes[0]
__lowercase : List[Any] = model_class(__a )
model.to(__a )
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Optional[int] = outputs[0][-1]
# Encoder-/Decoder-only models
__lowercase : Any = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowercase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__a )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
__lowercase : int = model(**__a )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowercase : Any = copy.deepcopy(__a )
__lowercase : Dict = None
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
__lowercase : Optional[int] = model(**__a )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowercase : List[str] = copy.deepcopy(__a )
__lowercase : Optional[Any] = False
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
__lowercase : List[Any] = model(**__a )
| 306
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Union[str, Any] = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 156
|
"""simple docstring"""
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
return round(float(moles / volume ) * nfactor )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61
| 0
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
_lowerCamelCase : List[str] = logging.getLogger(__name__)
def _a ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 )
return np.sum(outputs == labels )
def _a ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , encoding="utf_8" ) as f:
SCREAMING_SNAKE_CASE__ : Any = csv.reader(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = []
next(SCREAMING_SNAKE_CASE__ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE__ ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = []
for dataset in encoded_datasets:
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
SCREAMING_SNAKE_CASE__ : List[Any] = np.zeros((n_batch, 2) , dtype=np.intaa )
SCREAMING_SNAKE_CASE__ : List[Any] = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
SCREAMING_SNAKE_CASE__ : Tuple = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
SCREAMING_SNAKE_CASE__ : List[str] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
SCREAMING_SNAKE_CASE__ : Optional[Any] = with_conta
SCREAMING_SNAKE_CASE__ : int = with_conta
SCREAMING_SNAKE_CASE__ : Dict = len(SCREAMING_SNAKE_CASE__ ) - 1
SCREAMING_SNAKE_CASE__ : int = len(SCREAMING_SNAKE_CASE__ ) - 1
SCREAMING_SNAKE_CASE__ : Optional[int] = with_conta
SCREAMING_SNAKE_CASE__ : Dict = with_conta
SCREAMING_SNAKE_CASE__ : int = mc_label
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE__ ) for t in all_inputs ) )
return tensor_datasets
def _a ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE__ , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=SCREAMING_SNAKE_CASE__ , default="" )
parser.add_argument("--eval_dataset" , type=SCREAMING_SNAKE_CASE__ , default="" )
parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE__ , default=42 )
parser.add_argument("--num_train_epochs" , type=SCREAMING_SNAKE_CASE__ , default=3 )
parser.add_argument("--train_batch_size" , type=SCREAMING_SNAKE_CASE__ , default=8 )
parser.add_argument("--eval_batch_size" , type=SCREAMING_SNAKE_CASE__ , default=16 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=SCREAMING_SNAKE_CASE__ , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=SCREAMING_SNAKE_CASE__ , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=SCREAMING_SNAKE_CASE__ , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE__ , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=SCREAMING_SNAKE_CASE__ , default=6.2_5E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=SCREAMING_SNAKE_CASE__ , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=SCREAMING_SNAKE_CASE__ , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=SCREAMING_SNAKE_CASE__ , default=0.0_1 )
parser.add_argument("--lm_coef" , type=SCREAMING_SNAKE_CASE__ , default=0.9 )
parser.add_argument("--n_valid" , type=SCREAMING_SNAKE_CASE__ , default=3_74 )
parser.add_argument("--server_ip" , type=SCREAMING_SNAKE_CASE__ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=SCREAMING_SNAKE_CASE__ , default="" , help="Can be used for distant debugging." )
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
print(SCREAMING_SNAKE_CASE__ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE__ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
SCREAMING_SNAKE_CASE__ : Any = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
SCREAMING_SNAKE_CASE__ : int = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
SCREAMING_SNAKE_CASE__ : List[str] = ["_start_", "_delimiter_", "_classify_"]
SCREAMING_SNAKE_CASE__ : int = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE__ ) )
model.to(SCREAMING_SNAKE_CASE__ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE__ : int ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE__ ) for o in obj]
logger.info("Encoding dataset..." )
SCREAMING_SNAKE_CASE__ : str = load_rocstories_dataset(args.train_dataset )
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_rocstories_dataset(args.eval_dataset )
SCREAMING_SNAKE_CASE__ : Optional[int] = (train_dataset, eval_dataset)
SCREAMING_SNAKE_CASE__ : Any = tokenize_and_encode(SCREAMING_SNAKE_CASE__ )
# Compute the max input length for the Transformer
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.config.n_positions // 2 - 2
SCREAMING_SNAKE_CASE__ : Dict = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
SCREAMING_SNAKE_CASE__ : Optional[Any] = min(SCREAMING_SNAKE_CASE__ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
SCREAMING_SNAKE_CASE__ : Any = pre_process_datasets(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = tensor_datasets[0], tensor_datasets[1]
SCREAMING_SNAKE_CASE__ : Any = TensorDataset(*SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = RandomSampler(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.train_batch_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = TensorDataset(*SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = SequentialSampler(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
SCREAMING_SNAKE_CASE__ : List[str] = args.max_steps
SCREAMING_SNAKE_CASE__ : Any = args.max_steps // (len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps) + 1
else:
SCREAMING_SNAKE_CASE__ : Dict = len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps * args.num_train_epochs
SCREAMING_SNAKE_CASE__ : str = list(model.named_parameters() )
SCREAMING_SNAKE_CASE__ : int = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
SCREAMING_SNAKE_CASE__ : str = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
SCREAMING_SNAKE_CASE__ : Optional[int] = AdamW(SCREAMING_SNAKE_CASE__ , lr=args.learning_rate , eps=args.adam_epsilon )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE__ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE__ )
if args.do_train:
SCREAMING_SNAKE_CASE__ : Any = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : str = tqdm(SCREAMING_SNAKE_CASE__ , desc="Training" )
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[str] = tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch )
SCREAMING_SNAKE_CASE__ : Any = batch
SCREAMING_SNAKE_CASE__ : Tuple = model(SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
SCREAMING_SNAKE_CASE__ : List[str] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
SCREAMING_SNAKE_CASE__ : int = "Training loss: {:.2e} lr: {:.2e}".format(SCREAMING_SNAKE_CASE__ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
SCREAMING_SNAKE_CASE__ : Tuple = model.module if hasattr(SCREAMING_SNAKE_CASE__ , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE__ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE__ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
SCREAMING_SNAKE_CASE__ : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
SCREAMING_SNAKE_CASE__ : Optional[Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE__ )
if args.do_eval:
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple = 0, 0
SCREAMING_SNAKE_CASE__ : List[Any] = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE__ , desc="Evaluating" ):
SCREAMING_SNAKE_CASE__ : Optional[int] = tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch )
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(
SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = mc_logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = mc_labels.to("cpu" ).numpy()
SCREAMING_SNAKE_CASE__ : Dict = accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
SCREAMING_SNAKE_CASE__ : Dict = eval_loss / nb_eval_steps
SCREAMING_SNAKE_CASE__ : Optional[Any] = eval_accuracy / nb_eval_examples
SCREAMING_SNAKE_CASE__ : Any = tr_loss / nb_tr_steps if args.do_train else None
SCREAMING_SNAKE_CASE__ : str = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
SCREAMING_SNAKE_CASE__ : str = os.path.join(args.output_dir , "eval_results.txt" )
with open(SCREAMING_SNAKE_CASE__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , SCREAMING_SNAKE_CASE__ , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 371
|
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
_lowerCamelCase : Any = f"https://www.google.com/search?q={query}&num=100"
_lowerCamelCase : Optional[Any] = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
_lowerCamelCase : Union[str, Any] = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
_lowerCamelCase : Optional[Any] = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 191
| 0
|
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
_UpperCAmelCase = TapasConfig.from_json_file(lowercase )
# set absolute/relative position embeddings parameter
_UpperCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = True
# hparam_utils.py hparams
_UpperCAmelCase = 0.66_46_94
_UpperCAmelCase = 0.20_79_51
_UpperCAmelCase = 0.12_11_94
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = 0.0_35_25_13
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = False
# hparam_utils.py hparams
_UpperCAmelCase = 36.45_19
_UpperCAmelCase = 0.90_34_21
_UpperCAmelCase = 2_22.0_88
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 0.76_31_41
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "TABFACT":
_UpperCAmelCase = TapasForSequenceClassification(config=lowercase )
elif task == "MLM":
_UpperCAmelCase = TapasForMaskedLM(config=lowercase )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCAmelCase = TapasModel(config=lowercase )
else:
raise ValueError(f'''Task {task} not supported.''' )
print(f'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowercase ,lowercase ,lowercase )
# Save pytorch-model (weights and configuration)
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowercase )
# Save tokenizer files
print(f'''Save tokenizer files to {pytorch_dump_path}''' )
_UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" ,model_max_length=5_12 )
tokenizer.save_pretrained(lowercase )
print("""Used relative position embeddings:""" ,model.config.reset_position_index_per_cell )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 289
|
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = get_failure_array(lowercase )
# 2) Step through text searching for pattern
_UpperCAmelCase , _UpperCAmelCase = 0, 0 # index into text, pattern
while i < len(lowercase ):
if pattern[j] == text[i]:
if j == (len(lowercase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_UpperCAmelCase = failure[j - 1]
continue
i += 1
return False
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [0]
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while j < len(lowercase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_UpperCAmelCase = failure[i - 1]
continue
j += 1
failure.append(lowercase )
return failure
if __name__ == "__main__":
# Test 1)
UpperCAmelCase__ = """abc1abc12"""
UpperCAmelCase__ = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
UpperCAmelCase__ = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
UpperCAmelCase__ = """ABABX"""
UpperCAmelCase__ = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
UpperCAmelCase__ = """AAAB"""
UpperCAmelCase__ = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
UpperCAmelCase__ = """abcdabcy"""
UpperCAmelCase__ = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
UpperCAmelCase__ = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 289
| 1
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __lowerCamelCase ( ):
"""simple docstring"""
a :Tuple = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
a :Union[str, Any] = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw ).convert('''RGB''' )
return image
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
a :str = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
a :Union[str, Any] = dct.pop(UpperCAmelCase_ )
a :Optional[Any] = val
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
a :Any = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
a :List[Any] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
a :str = torch.cat((q_bias, torch.zeros_like(UpperCAmelCase_ , requires_grad=UpperCAmelCase_ ), v_bias) )
a :Any = qkv_bias
def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] ):
"""simple docstring"""
a :str = 364 if '''coco''' in model_name else 224
a :Tuple = BlipaVisionConfig(image_size=UpperCAmelCase_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
a :int = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=UpperCAmelCase_ ).to_dict()
elif "opt-6.7b" in model_name:
a :Any = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=UpperCAmelCase_ ).to_dict()
elif "t5-xl" in model_name:
a :Any = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
a :str = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
a :Union[str, Any] = BlipaConfig(vision_config=UpperCAmelCase_ , text_config=UpperCAmelCase_ )
return config, image_size
@torch.no_grad()
def __lowerCamelCase ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Dict=False ):
"""simple docstring"""
a :int = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
a :Union[str, Any] = tokenizer('''\n''' , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
a , a :Any = get_blipa_config(UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
a :Optional[Any] = BlipaForConditionalGeneration(UpperCAmelCase_ ).eval()
a :List[str] = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
a , a :str = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
a :Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
a , a , a :Tuple = load_model_and_preprocess(
name=UpperCAmelCase_ , model_type=UpperCAmelCase_ , is_eval=UpperCAmelCase_ , device=UpperCAmelCase_ )
original_model.eval()
print('''Done!''' )
# update state dict keys
a :Optional[int] = original_model.state_dict()
a :int = create_rename_keys(UpperCAmelCase_ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
a :Dict = state_dict.pop(UpperCAmelCase_ )
if key.startswith('''Qformer.bert''' ):
a :Optional[Any] = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
a :Optional[Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
a :Optional[int] = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
a :Dict = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
a :Tuple = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
a :Any = key.replace('''t5''' , '''language''' )
a :Optional[int] = val
# read in qv biases
read_in_q_v_bias(UpperCAmelCase_ , UpperCAmelCase_ )
a , a :Any = hf_model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
assert len(UpperCAmelCase_ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
a :Dict = load_demo_image()
a :int = vis_processors['''eval'''](UpperCAmelCase_ ).unsqueeze(0 ).to(UpperCAmelCase_ )
a :Optional[Any] = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(UpperCAmelCase_ )
# create processor
a :str = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=UpperCAmelCase_ , image_std=UpperCAmelCase_ )
a :str = BlipaProcessor(image_processor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
a :Optional[int] = processor(images=UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values.to(UpperCAmelCase_ )
# make sure processor creates exact same pixel values
assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ )
original_model.to(UpperCAmelCase_ )
hf_model.to(UpperCAmelCase_ )
with torch.no_grad():
if "opt" in model_name:
a :Optional[int] = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
a :Optional[int] = hf_model(UpperCAmelCase_ , UpperCAmelCase_ ).logits
else:
a :int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
a :int = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
a :Union[str, Any] = hf_model(UpperCAmelCase_ , UpperCAmelCase_ , labels=UpperCAmelCase_ ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
a :str = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=UpperCAmelCase_ )
assert torch.allclose(logits[0, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
a :List[str] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=UpperCAmelCase_ )
else:
# cast to same type
a :Optional[Any] = logits.dtype
assert torch.allclose(original_logits.to(UpperCAmelCase_ ) , UpperCAmelCase_ , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
a :str = ''''''
a :str = tokenizer(UpperCAmelCase_ , return_tensors='''pt''' ).input_ids.to(UpperCAmelCase_ )
a :Optional[Any] = original_model.generate({'''image''': original_pixel_values} )
a :Union[str, Any] = hf_model.generate(
UpperCAmelCase_ , UpperCAmelCase_ , do_sample=UpperCAmelCase_ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , UpperCAmelCase_ )
a :str = input_ids.shape[1]
a :Union[str, Any] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=UpperCAmelCase_ )
a :Tuple = [text.strip() for text in output_text]
print('''HF generation:''' , UpperCAmelCase_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCAmelCase_ )
hf_model.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
snake_case : List[str] = argparse.ArgumentParser()
snake_case : Optional[Any] = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
snake_case : Optional[int] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 281
|
from ...configuration_utils import PretrainedConfig
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'bert-generation'
def __init__( self , _lowerCamelCase=5_0358 , _lowerCamelCase=1024 , _lowerCamelCase=24 , _lowerCamelCase=16 , _lowerCamelCase=4096 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase="absolute" , _lowerCamelCase=True , **_lowerCamelCase , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
a :Optional[int] = vocab_size
a :Tuple = hidden_size
a :Any = num_hidden_layers
a :Any = num_attention_heads
a :List[Any] = hidden_act
a :Tuple = intermediate_size
a :Any = hidden_dropout_prob
a :int = attention_probs_dropout_prob
a :Dict = max_position_embeddings
a :int = initializer_range
a :Union[str, Any] = layer_norm_eps
a :str = position_embedding_type
a :int = use_cache
| 281
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 262
|
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a ) -> None:
snake_case_ = set_counts
snake_case_ = max(a )
snake_case_ = len(a )
snake_case_ = [1] * num_sets
snake_case_ = list(range(a ) )
def _UpperCamelCase ( self , a , a ) -> bool:
snake_case_ = self.get_parent(a )
snake_case_ = self.get_parent(a )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
snake_case_ = 0
snake_case_ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
snake_case_ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
snake_case_ = 0
snake_case_ = src_parent
snake_case_ = self.set_counts[src_parent]
snake_case_ = max(self.max_set , a )
return True
def _UpperCamelCase ( self , a ) -> int:
if self.parents[disj_set] == disj_set:
return disj_set
snake_case_ = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 178
| 0
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __lowerCamelCase ( unittest.TestCase , A__ ):
'''simple docstring'''
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : int = load_tool("text-classification" )
self.tool.setup()
lowerCAmelCase_ : int = load_tool("text-classification" , remote=__A )
def lowerCamelCase ( self : Optional[Any] ):
lowerCAmelCase_ : int = self.tool("That\'s quite cool" , ["positive", "negative"] )
self.assertEqual(__A , "positive" )
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : Optional[Any] = self.remote_tool("That\'s quite cool" , ["positive", "negative"] )
self.assertEqual(__A , "positive" )
def lowerCamelCase ( self : Optional[Any] ):
lowerCAmelCase_ : Tuple = self.tool(text="That\'s quite cool" , labels=["positive", "negative"] )
self.assertEqual(__A , "positive" )
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Dict = self.remote_tool(text="That\'s quite cool" , labels=["positive", "negative"] )
self.assertEqual(__A , "positive" )
| 359
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowercase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Tuple:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
lowerCAmelCase_ : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(__UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = XLMProphetNetForConditionalGeneration.from_pretrained(
__UpperCamelCase , output_loading_info=__UpperCamelCase )
else:
lowerCAmelCase_ : List[str] = ProphetNetForConditionalGenerationOld.from_pretrained(__UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = ProphetNetForConditionalGeneration.from_pretrained(
__UpperCamelCase , output_loading_info=__UpperCamelCase )
lowerCAmelCase_ : List[str] = ["key_proj", "value_proj", "query_proj"]
lowerCAmelCase_ : Tuple = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
lowerCAmelCase_ : Dict = key.split("." )
if attributes[0] == "lm_head":
lowerCAmelCase_ : int = prophet
lowerCAmelCase_ : int = prophet_old
else:
lowerCAmelCase_ : str = prophet.prophetnet
lowerCAmelCase_ : int = prophet_old.model
lowerCAmelCase_ : Optional[int] = False
for attribute in attributes:
if attribute in mapping:
lowerCAmelCase_ : Tuple = mapping[attribute]
if not hasattr(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) > 0:
lowerCAmelCase_ : Optional[Any] = attribute
elif hasattr(__UpperCamelCase , __UpperCamelCase ):
lowerCAmelCase_ : Optional[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowerCAmelCase_ : str = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
lowerCAmelCase_ : Union[str, Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowerCAmelCase_ : Tuple = old_model.bias
logger.info(f'''{attribute} is initialized''' )
lowerCAmelCase_ : Optional[int] = True
break
elif attribute in special_keys and hasattr(__UpperCamelCase , "in_proj_weight" ):
lowerCAmelCase_ : List[Any] = old_model.in_proj_weight.shape[0] // 3
lowerCAmelCase_ : List[str] = getattr(__UpperCamelCase , __UpperCamelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowerCAmelCase_ : List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowerCAmelCase_ : List[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowerCAmelCase_ : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowerCAmelCase_ : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowerCAmelCase_ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowerCAmelCase_ : str = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowerCAmelCase_ : List[str] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowerCAmelCase_ : Any = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowerCAmelCase_ : int = True
break
if attribute.isdigit():
lowerCAmelCase_ : Tuple = model[int(__UpperCamelCase )]
lowerCAmelCase_ : Tuple = old_model[int(__UpperCamelCase )]
else:
lowerCAmelCase_ : Optional[int] = getattr(__UpperCamelCase , __UpperCamelCase )
if old_attribute == "":
lowerCAmelCase_ : Tuple = old_model
else:
if not hasattr(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
lowerCAmelCase_ : List[Any] = getattr(__UpperCamelCase , __UpperCamelCase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowercase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 161
| 0
|
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def _a ( *a :Union[str, Any] ) -> List[str]:
if not isinstance(snake_case__ , snake_case__ ):
a = list(snake_case__ )
for i in range(len(snake_case__ ) ):
a = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def _a ( a :int ) -> bool:
a = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(snake_case__ , snake_case__ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def _a ( a :Dict = None , a :int = 128 ) -> Optional[Any]:
if function is None:
return functools.partial(snake_case__ , starting_batch_size=snake_case__ )
a = starting_batch_size
def decorator(*a :Tuple , **a :Union[str, Any] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
a = list(inspect.signature(snake_case__ ).parameters.keys() )
# Guard against user error
if len(snake_case__ ) < (len(snake_case__ ) + 1):
a = ''', '''.join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"""Batch size was passed into `{function.__name__}` as the first argument when called."""
F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(snake_case__ , *snake_case__ , **snake_case__ )
except Exception as e:
if should_reduce_batch_size(snake_case__ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Tuple = VOCAB_FILES_NAMES
__snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Optional[Any] = ["input_ids", "attention_mask"]
__snake_case : Optional[int] = None
def __init__( self: Dict , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: str=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int="<unk>" , UpperCAmelCase_: List[str]="<s>" , UpperCAmelCase_: Tuple="</s>" , UpperCAmelCase_: List[Any]="<pad>" , UpperCAmelCase_: Dict=False , UpperCAmelCase_: Dict=False , **UpperCAmelCase_: Dict , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCAmelCase_ ) != add_prefix_space:
_SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase_ , pre_tok_state.pop("""type""" ) )
_SCREAMING_SNAKE_CASE = add_prefix_space
_SCREAMING_SNAKE_CASE = pre_tok_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = add_prefix_space
def UpperCamelCase ( self: List[str] , *UpperCAmelCase_: Any , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] , *UpperCAmelCase_: Dict , **UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: "Conversation" ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) + [self.eos_token_id] )
if len(UpperCAmelCase_ ) > self.model_max_length:
_SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 306
| 0
|
"""simple docstring"""
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
SCREAMING_SNAKE_CASE_ = str(bin(__UpperCamelCase ) )
binary_number += "0" * shift_amount
return binary_number
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
SCREAMING_SNAKE_CASE_ = str(bin(__UpperCamelCase ) )[2:]
if shift_amount >= len(__UpperCamelCase ):
return "0b0"
SCREAMING_SNAKE_CASE_ = binary_number[: len(__UpperCamelCase ) - shift_amount]
return "0b" + shifted_binary_number
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if number >= 0: # Get binary representation of positive number
SCREAMING_SNAKE_CASE_ = "0" + str(bin(__UpperCamelCase ) ).strip("-" )[2:]
else: # Get binary (2's complement) representation of negative number
SCREAMING_SNAKE_CASE_ = len(bin(__UpperCamelCase )[3:] ) # Find 2's complement of number
SCREAMING_SNAKE_CASE_ = bin(abs(__UpperCamelCase ) - (1 << binary_number_length) )[3:]
SCREAMING_SNAKE_CASE_ = (
"1" + "0" * (binary_number_length - len(__UpperCamelCase )) + binary_number
)
if shift_amount >= len(__UpperCamelCase ):
return "0b" + binary_number[0] * len(__UpperCamelCase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__UpperCamelCase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
A : Union[str, Any] = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls : Any ) -> Dict:
SCREAMING_SNAKE_CASE_ = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def __A ( cls : Optional[int] ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id="test-model-flax" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" )
except HTTPError:
pass
def __A ( self : str ) -> str:
SCREAMING_SNAKE_CASE_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
model.push_to_hub("test-model-flax" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="test-model-flax" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__magic_name__ , repo_id="test-model-flax" , push_to_hub=__magic_name__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
def __A ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__magic_name__ , repo_id="valid_org/test-model-flax-org" , push_to_hub=__magic_name__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = flatten_dict(modela.params )
SCREAMING_SNAKE_CASE_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
SCREAMING_SNAKE_CASE_ = False
return models_are_equal
@require_flax
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE_ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
SCREAMING_SNAKE_CASE_ = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) )
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) )
def __A ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
SCREAMING_SNAKE_CASE_ = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) , max_shard_size="10KB" )
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) )
def __A ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE_ = "bert"
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def __A ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE_ = "bert"
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 305
| 0
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=2 , lowerCAmelCase_=24 , lowerCAmelCase_=16 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=10 , lowerCAmelCase_=0.02 , lowerCAmelCase_=None , lowerCAmelCase_=2 , lowerCAmelCase_=2 , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = patch_size
_snake_case = max_length
_snake_case = num_mel_bins
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = scope
_snake_case = frequency_stride
_snake_case = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_snake_case = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_snake_case = (self.max_length - self.patch_size) // self.time_stride + 1
_snake_case = frequency_out_dimension * time_out_dimension
_snake_case = num_patches + 2
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = self.get_config()
return config, input_values, labels
def lowerCamelCase ( self ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = ASTModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
_snake_case = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
(
_snake_case
) = config_and_inputs
_snake_case = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
__lowercase = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__lowercase = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = False
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = ASTModelTester(self )
_snake_case = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(SCREAMING_SNAKE_CASE__ )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''input_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = ASTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( ) -> str:
_snake_case = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' )
_snake_case = torchaudio.load(a_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.default_feature_extractor
_snake_case = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(SCREAMING_SNAKE_CASE__ )
_snake_case = self.default_feature_extractor
_snake_case = prepare_audio()
_snake_case = audio.squeeze().numpy()
_snake_case = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
_snake_case = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
_snake_case = torch.Size((1, 5_27) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
_snake_case = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 42
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : str = '''beit'''
def __init__( self ,SCREAMING_SNAKE_CASE__=81_92 ,SCREAMING_SNAKE_CASE__=7_68 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=30_72 ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=1E-12 ,SCREAMING_SNAKE_CASE__=2_24 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=[3, 5, 7, 11] ,SCREAMING_SNAKE_CASE__=[1, 2, 3, 6] ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=0.4 ,SCREAMING_SNAKE_CASE__=2_56 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=2_55 ,**SCREAMING_SNAKE_CASE__ ,) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = vocab_size
__SCREAMING_SNAKE_CASE :Dict = hidden_size
__SCREAMING_SNAKE_CASE :Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE :List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE :Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE :Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE :Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :Optional[Any] = initializer_range
__SCREAMING_SNAKE_CASE :str = layer_norm_eps
__SCREAMING_SNAKE_CASE :int = image_size
__SCREAMING_SNAKE_CASE :Tuple = patch_size
__SCREAMING_SNAKE_CASE :Any = num_channels
__SCREAMING_SNAKE_CASE :Any = use_mask_token
__SCREAMING_SNAKE_CASE :Union[str, Any] = use_absolute_position_embeddings
__SCREAMING_SNAKE_CASE :Union[str, Any] = use_relative_position_bias
__SCREAMING_SNAKE_CASE :Union[str, Any] = use_shared_relative_position_bias
__SCREAMING_SNAKE_CASE :List[str] = layer_scale_init_value
__SCREAMING_SNAKE_CASE :Optional[Any] = drop_path_rate
__SCREAMING_SNAKE_CASE :str = use_mean_pooling
# decode head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE :Dict = out_indices
__SCREAMING_SNAKE_CASE :Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE :Optional[int] = use_auxiliary_head
__SCREAMING_SNAKE_CASE :Union[str, Any] = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE :Dict = auxiliary_channels
__SCREAMING_SNAKE_CASE :Optional[int] = auxiliary_num_convs
__SCREAMING_SNAKE_CASE :List[str] = auxiliary_concat_input
__SCREAMING_SNAKE_CASE :List[Any] = semantic_loss_ignore_index
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : List[Any] = version.parse('''1.11''' )
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _UpperCamelCase ( self ) -> float:
"""simple docstring"""
return 1E-4
| 191
| 0
|
from math import pi, sqrt, tan
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float ) -> float:
if side_length < 0:
raise ValueError("""surface_area_cube() only accepts non-negative values""" )
return 6 * side_length**2
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float ) -> float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError("""surface_area_cuboid() only accepts non-negative values""" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float ) -> float:
if radius < 0:
raise ValueError("""surface_area_sphere() only accepts non-negative values""" )
return 4 * pi * radius**2
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float ) -> float:
if radius < 0:
raise ValueError("""surface_area_hemisphere() only accepts non-negative values""" )
return 3 * pi * radius**2
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float ) -> float:
if radius < 0 or height < 0:
raise ValueError("""surface_area_cone() only accepts non-negative values""" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float ) -> float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"""surface_area_conical_frustum() only accepts non-negative values""" )
__lowerCAmelCase : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float ) -> float:
if radius < 0 or height < 0:
raise ValueError("""surface_area_cylinder() only accepts non-negative values""" )
return 2 * pi * radius * (height + radius)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float ) -> float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError("""surface_area_torus() only accepts non-negative values""" )
if torus_radius < tube_radius:
raise ValueError(
"""surface_area_torus() does not support spindle or self intersecting tori""" )
return 4 * pow(SCREAMING_SNAKE_CASE , 2 ) * torus_radius * tube_radius
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float ) -> float:
if length < 0 or width < 0:
raise ValueError("""area_rectangle() only accepts non-negative values""" )
return length * width
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float ) -> float:
if side_length < 0:
raise ValueError("""area_square() only accepts non-negative values""" )
return side_length**2
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float ) -> float:
if base < 0 or height < 0:
raise ValueError("""area_triangle() only accepts non-negative values""" )
return (base * height) / 2
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float ) -> float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("""area_triangle_three_sides() only accepts non-negative values""" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("""Given three sides do not form a triangle""" )
__lowerCAmelCase : List[str] = (sidea + sidea + sidea) / 2
__lowerCAmelCase : Dict = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float ) -> float:
if base < 0 or height < 0:
raise ValueError("""area_parallelogram() only accepts non-negative values""" )
return base * height
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float ) -> float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError("""area_trapezium() only accepts non-negative values""" )
return 1 / 2 * (basea + basea) * height
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float ) -> float:
if radius < 0:
raise ValueError("""area_circle() only accepts non-negative values""" )
return pi * radius**2
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float ) -> float:
if radius_x < 0 or radius_y < 0:
raise ValueError("""area_ellipse() only accepts non-negative values""" )
return pi * radius_x * radius_y
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float ) -> float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("""area_rhombus() only accepts non-negative values""" )
return 1 / 2 * diagonal_a * diagonal_a
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :float ) -> float:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or sides < 3:
raise ValueError(
"""area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides""" )
elif length < 0:
raise ValueError(
"""area_reg_polygon() only accepts non-negative values as \
length of a side""" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f'''Rectangle: {area_rectangle(10, 20) = }''')
print(f'''Square: {area_square(10) = }''')
print(f'''Triangle: {area_triangle(10, 10) = }''')
print(f'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(f'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(f'''Rhombus: {area_rhombus(10, 20) = }''')
print(f'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(f'''Circle: {area_circle(20) = }''')
print(f'''Ellipse: {area_ellipse(10, 20) = }''')
print('\nSurface Areas of various geometric shapes: \n')
print(f'''Cube: {surface_area_cube(20) = }''')
print(f'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(f'''Sphere: {surface_area_sphere(20) = }''')
print(f'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(f'''Cone: {surface_area_cone(10, 20) = }''')
print(f'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(f'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(f'''Torus: {surface_area_torus(20, 10) = }''')
print(f'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(f'''Square: {area_reg_polygon(4, 10) = }''')
print(f'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 365
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = '▁'
_UpperCAmelCase = {'vocab_file': 'spiece.model'}
_UpperCAmelCase = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
_UpperCAmelCase = {
'google/pegasus-xsum': 512,
}
_UpperCAmelCase = logging.get_logger(__name__)
class snake_case_ ( __lowercase ):
A_ = VOCAB_FILES_NAMES
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['input_ids', 'attention_mask']
def __init__( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : Union[str, Any]="<pad>" , _snake_case : int="</s>" , _snake_case : Any="<unk>" , _snake_case : Union[str, Any]="<mask_2>" , _snake_case : Any="<mask_1>" , _snake_case : Optional[int]=None , _snake_case : List[str]=103 , _snake_case : Optional[Dict[str, Any]] = None , **_snake_case : Optional[int] , )->None:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = offset
if additional_special_tokens is not None:
if not isinstance(_snake_case , _snake_case ):
raise TypeError(
F'''additional_special_tokens should be of type {type(_snake_case )}, but is'''
F''' {type(_snake_case )}''' )
__lowerCAmelCase : List[str] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(_snake_case ) , self.offset - 1 )
]
if len(set(_snake_case ) ) != len(_snake_case ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
__lowerCAmelCase : Dict = additional_special_tokens_extended
else:
__lowerCAmelCase : Tuple = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2 , self.offset )]
__lowerCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_snake_case , unk_token=_snake_case , mask_token=_snake_case , pad_token=_snake_case , mask_token_sent=_snake_case , offset=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
__lowerCAmelCase : Optional[Any] = mask_token_sent
__lowerCAmelCase : Any = vocab_file
__lowerCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
# add special tokens to encoder dict
__lowerCAmelCase : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__lowerCAmelCase : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def UpperCAmelCase__ ( self : str )->int:
'''simple docstring'''
return len(self.sp_model ) + self.offset
def UpperCAmelCase__ ( self : Dict )->Dict[str, int]:
'''simple docstring'''
__lowerCAmelCase : Tuple = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] )->str:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.__dict__.copy()
__lowerCAmelCase : Union[str, Any] = None
return state
def __setstate__( self : Any , _snake_case : str )->Any:
'''simple docstring'''
__lowerCAmelCase : str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCAmelCase : Any = {}
__lowerCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : Dict , _snake_case : str )->List[str]:
'''simple docstring'''
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def UpperCAmelCase__ ( self : Tuple , _snake_case : str )->int:
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__lowerCAmelCase : Any = self.sp_model.piece_to_id(_snake_case )
return sp_id + self.offset
def UpperCAmelCase__ ( self : List[Any] , _snake_case : int )->str:
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__lowerCAmelCase : Optional[int] = self.sp_model.IdToPiece(index - self.offset )
return token
def UpperCAmelCase__ ( self : Union[str, Any] , _snake_case : Optional[int] )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Any = []
__lowerCAmelCase : Dict = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_snake_case ) + token
__lowerCAmelCase : int = []
else:
current_sub_tokens.append(_snake_case )
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : Dict=False )->int:
'''simple docstring'''
return 1
def UpperCAmelCase__ ( self : Tuple , _snake_case : Tuple )->str:
'''simple docstring'''
__lowerCAmelCase : List[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCAmelCase__ ( self : List[str] , _snake_case : List , _snake_case : Optional[List] = None , _snake_case : bool = False )->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(_snake_case )
elif token_ids_a is None:
return self._special_token_mask(_snake_case ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def UpperCAmelCase__ ( self : Any , _snake_case : Union[str, Any] , _snake_case : Tuple=None )->List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Any , _snake_case : str , _snake_case : Optional[str] = None )->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCAmelCase : Optional[int] = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , """wb""" ) as fi:
__lowerCAmelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 232
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.