code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
lowercase__ : Any = 6_37_81_37.0
lowercase__ : Tuple = 6_35_67_52.31_42_45
lowercase__ : Any = 637_8137
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
snake_case_ = atan((1 - flattening) * tan(radians(_A ) ) )
snake_case_ = atan((1 - flattening) * tan(radians(_A ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
snake_case_ = haversine_distance(_A , _A , _A , _A ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
snake_case_ = (b_lata + b_lata) / 2
snake_case_ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
snake_case_ = (sin(_A ) ** 2) * (cos(_A ) ** 2)
snake_case_ = cos(sigma / 2 ) ** 2
snake_case_ = (sigma - sin(_A )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
snake_case_ = (cos(_A ) ** 2) * (sin(_A ) ** 2)
snake_case_ = sin(sigma / 2 ) ** 2
snake_case_ = (sigma + sin(_A )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 187
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase__ ( _A , _A , _A , _A , _A=True , _A="pt" ):
'''simple docstring'''
snake_case_ = {"add_prefix_space": True} if isinstance(_A , _A ) and not line.startswith(" " ) else {}
snake_case_ = padding_side
return tokenizer(
[line] , max_length=_A , padding="max_length" if pad_to_max_length else None , truncation=_A , return_tensors=_A , add_special_tokens=_A , **_A , )
def lowerCamelCase__ ( _A , _A , _A=None , ):
'''simple docstring'''
snake_case_ = input_ids.ne(_A ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : int , __lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : str , __lowercase : Tuple="train" , __lowercase : List[str]=None , __lowercase : List[Any]=None , __lowercase : Optional[Any]=None , __lowercase : Union[str, Any]="" , ):
"""simple docstring"""
super().__init__()
snake_case_ = Path(__lowercase ).joinpath(type_path + ".source" )
snake_case_ = Path(__lowercase ).joinpath(type_path + ".target" )
snake_case_ = self.get_char_lens(self.src_file )
snake_case_ = max_source_length
snake_case_ = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
snake_case_ = tokenizer
snake_case_ = prefix
if n_obs is not None:
snake_case_ = self.src_lens[:n_obs]
snake_case_ = src_lang
snake_case_ = tgt_lang
def __len__( self : List[Any] ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : List[Any] , __lowercase : Dict ):
"""simple docstring"""
snake_case_ = index + 1 # linecache starts at 1
snake_case_ = self.prefix + linecache.getline(str(self.src_file ) , __lowercase ).rstrip("\n" )
snake_case_ = linecache.getline(str(self.tgt_file ) , __lowercase ).rstrip("\n" )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
snake_case_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowercase ) else self.tokenizer
)
snake_case_ = self.tokenizer.generator if isinstance(self.tokenizer , __lowercase ) else self.tokenizer
snake_case_ = encode_line(__lowercase , __lowercase , self.max_source_length , "right" )
snake_case_ = encode_line(__lowercase , __lowercase , self.max_target_length , "right" )
snake_case_ = source_inputs["input_ids"].squeeze()
snake_case_ = target_inputs["input_ids"].squeeze()
snake_case_ = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case__ ( __lowercase : Optional[int] ):
"""simple docstring"""
return [len(__lowercase ) for x in Path(__lowercase ).open().readlines()]
def snake_case__ ( self : Dict , __lowercase : Union[str, Any] ):
"""simple docstring"""
snake_case_ = torch.stack([x["input_ids"] for x in batch] )
snake_case_ = torch.stack([x["attention_mask"] for x in batch] )
snake_case_ = torch.stack([x["decoder_input_ids"] for x in batch] )
snake_case_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowercase )
else self.tokenizer.pad_token_id
)
snake_case_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowercase )
else self.tokenizer.pad_token_id
)
snake_case_ = trim_batch(__lowercase , __lowercase )
snake_case_ , snake_case_ = trim_batch(__lowercase , __lowercase , attention_mask=__lowercase )
snake_case_ = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
lowercase__ : str = getLogger(__name__)
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return list(itertools.chain.from_iterable(_A ) )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = get_git_info()
save_json(_A , os.path.join(_A , "git_log.json" ) )
def lowerCamelCase__ ( _A , _A , _A=4 , **_A ):
'''simple docstring'''
with open(_A , "w" ) as f:
json.dump(_A , _A , indent=_A , **_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
with open(_A ) as f:
return json.load(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = git.Repo(search_parent_directories=_A )
snake_case_ = {
"repo_id": str(_A ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return list(map(_A , _A ) )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
with open(_A , "wb" ) as f:
return pickle.dump(_A , _A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
def remove_articles(_A ):
return re.sub(R"\b(a|an|the)\b" , " " , _A )
def white_space_fix(_A ):
return " ".join(text.split() )
def remove_punc(_A ):
snake_case_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = normalize_answer(_A ).split()
snake_case_ = normalize_answer(_A ).split()
snake_case_ = Counter(_A ) & Counter(_A )
snake_case_ = sum(common.values() )
if num_same == 0:
return 0
snake_case_ = 1.0 * num_same / len(_A )
snake_case_ = 1.0 * num_same / len(_A )
snake_case_ = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return normalize_answer(_A ) == normalize_answer(_A )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
assert len(_A ) == len(_A )
snake_case_ = 0
for hypo, pred in zip(_A , _A ):
em += exact_match_score(_A , _A )
if len(_A ) > 0:
em /= len(_A )
return {"em": em}
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return model_prefix.startswith("rag" )
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
snake_case_ = "dropout_rate"
for p in extra_params:
if getattr(_A , _A , _A ):
if not hasattr(_A , _A ) and not hasattr(_A , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(_A ) )
delattr(_A , _A )
continue
snake_case_ = p if hasattr(_A , _A ) else equivalent_param[p]
setattr(_A , _A , getattr(_A , _A ) )
delattr(_A , _A )
return hparams, config
| 187
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class UpperCamelCase ( lowercase_ ):
lowercase = 'xglm'
lowercase = ['past_key_values']
lowercase = {
'num_attention_heads': 'attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'num_layers',
}
def __init__( self ,__UpperCamelCase=25_6008 ,__UpperCamelCase=2048 ,__UpperCamelCase=1024 ,__UpperCamelCase=4096 ,__UpperCamelCase=24 ,__UpperCamelCase=16 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=2 ,__UpperCamelCase=1 ,__UpperCamelCase=0 ,__UpperCamelCase=2 ,**__UpperCamelCase ,) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : Optional[Any] = max_position_embeddings
lowercase_ : Tuple = d_model
lowercase_ : int = ffn_dim
lowercase_ : Tuple = num_layers
lowercase_ : Optional[int] = attention_heads
lowercase_ : Dict = activation_function
lowercase_ : Any = dropout
lowercase_ : Dict = attention_dropout
lowercase_ : Union[str, Any] = activation_dropout
lowercase_ : Dict = layerdrop
lowercase_ : Dict = init_std
lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : Dict = use_cache
super().__init__(
pad_token_id=__UpperCamelCase ,bos_token_id=__UpperCamelCase ,eos_token_id=__UpperCamelCase ,decoder_start_token_id=__UpperCamelCase ,**__UpperCamelCase ,)
| 321
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]]
lowercase_ : List[Any] = DisjunctiveConstraint(__UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids ,__UpperCamelCase ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(__UpperCamelCase ) # fails here
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
lowercase_ : Dict = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(1 )
lowercase_ : str = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = dc.update(2 )
lowercase_ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(3 )
lowercase_ : Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowercase_ : Union[str, Any] = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : str = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowercase_ , lowercase_ , lowercase_ : List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 321
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
__lowerCAmelCase : int ={
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _lowercase ( a_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = '''unispeech'''
def __init__( self :Union[str, Any] , lowerCAmelCase__ :Tuple=32 , lowerCAmelCase__ :Any=768 , lowerCAmelCase__ :int=12 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Union[str, Any]=3_072 , lowerCAmelCase__ :List[Any]="gelu" , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Any=0.1 , lowerCAmelCase__ :Any=0.1 , lowerCAmelCase__ :Dict=0.0 , lowerCAmelCase__ :Optional[int]=0.0 , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Optional[Any]=0.02 , lowerCAmelCase__ :Dict=1E-5 , lowerCAmelCase__ :List[str]="group" , lowerCAmelCase__ :Tuple="gelu" , lowerCAmelCase__ :List[str]=(512, 512, 512, 512, 512, 512, 512) , lowerCAmelCase__ :str=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__ :Any=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :Union[str, Any]=128 , lowerCAmelCase__ :int=16 , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :int=True , lowerCAmelCase__ :List[Any]=0.05 , lowerCAmelCase__ :Optional[int]=10 , lowerCAmelCase__ :Optional[int]=2 , lowerCAmelCase__ :Optional[Any]=0.0 , lowerCAmelCase__ :Dict=10 , lowerCAmelCase__ :List[str]=0 , lowerCAmelCase__ :int=320 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :str=100 , lowerCAmelCase__ :Tuple=256 , lowerCAmelCase__ :List[Any]=256 , lowerCAmelCase__ :Optional[int]=0.1 , lowerCAmelCase__ :Tuple="mean" , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Any=256 , lowerCAmelCase__ :Dict=80 , lowerCAmelCase__ :List[str]=0 , lowerCAmelCase__ :Any=1 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :Union[str, Any]=0.5 , **lowerCAmelCase__ :List[Any] , ) -> Optional[int]:
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : str = feat_extract_norm
__SCREAMING_SNAKE_CASE : List[Any] = feat_extract_activation
__SCREAMING_SNAKE_CASE : Optional[Any] = list(snake_case__ )
__SCREAMING_SNAKE_CASE : Tuple = list(snake_case__ )
__SCREAMING_SNAKE_CASE : Any = list(snake_case__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = conv_bias
__SCREAMING_SNAKE_CASE : int = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE : str = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE : int = len(self.conv_dim )
__SCREAMING_SNAKE_CASE : int = num_hidden_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
__SCREAMING_SNAKE_CASE : str = hidden_act
__SCREAMING_SNAKE_CASE : Any = num_attention_heads
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout
__SCREAMING_SNAKE_CASE : Union[str, Any] = activation_dropout
__SCREAMING_SNAKE_CASE : Any = feat_proj_dropout
__SCREAMING_SNAKE_CASE : List[Any] = final_dropout
__SCREAMING_SNAKE_CASE : Optional[int] = layerdrop
__SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : List[Any] = num_ctc_classes
__SCREAMING_SNAKE_CASE : List[Any] = vocab_size
__SCREAMING_SNAKE_CASE : str = do_stable_layer_norm
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_weighted_layer_sum
__SCREAMING_SNAKE_CASE : Optional[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE : List[Any] = apply_spec_augment
__SCREAMING_SNAKE_CASE : Optional[int] = mask_time_prob
__SCREAMING_SNAKE_CASE : Tuple = mask_time_length
__SCREAMING_SNAKE_CASE : Any = mask_time_min_masks
__SCREAMING_SNAKE_CASE : Any = mask_feature_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = mask_feature_length
__SCREAMING_SNAKE_CASE : List[str] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__SCREAMING_SNAKE_CASE : Dict = num_codevectors_per_group
__SCREAMING_SNAKE_CASE : List[Any] = num_codevector_groups
__SCREAMING_SNAKE_CASE : Optional[Any] = contrastive_logits_temperature
__SCREAMING_SNAKE_CASE : Union[str, Any] = feat_quantizer_dropout
__SCREAMING_SNAKE_CASE : Tuple = num_negatives
__SCREAMING_SNAKE_CASE : int = codevector_dim
__SCREAMING_SNAKE_CASE : Optional[Any] = proj_codevector_dim
__SCREAMING_SNAKE_CASE : Optional[int] = diversity_loss_weight
# ctc loss
__SCREAMING_SNAKE_CASE : Tuple = ctc_loss_reduction
__SCREAMING_SNAKE_CASE : int = ctc_zero_infinity
# pretraining loss
__SCREAMING_SNAKE_CASE : Any = replace_prob
@property
def __magic_name__( self :Tuple ) -> str:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 9
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
UpperCamelCase_ , UpperCamelCase_ = coefficient_matrix.shape
UpperCamelCase_ , UpperCamelCase_ = constant_matrix.shape
if rowsa != colsa:
UpperCamelCase_ = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase)
if colsa != 1:
UpperCamelCase_ = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase)
if rowsa != rowsa:
UpperCamelCase_ = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(_lowerCAmelCase)
if len(_lowerCAmelCase) != rowsa:
UpperCamelCase_ = (
"Number of initial values must be equal to number of rows in coefficient "
f"""matrix but received {len(_lowerCAmelCase)} and {rowsa}"""
)
raise ValueError(_lowerCAmelCase)
if iterations <= 0:
raise ValueError("Iterations must be at least 1")
UpperCamelCase_ = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1)
UpperCamelCase_ , UpperCamelCase_ = table.shape
strictly_diagonally_dominant(_lowerCAmelCase)
# Iterates the whole matrix for given number of times
for _ in range(_lowerCAmelCase):
UpperCamelCase_ = []
for row in range(_lowerCAmelCase):
UpperCamelCase_ = 0
for col in range(_lowerCAmelCase):
if col == row:
UpperCamelCase_ = table[row][col]
elif col == cols - 1:
UpperCamelCase_ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
UpperCamelCase_ = (temp + val) / denom
new_val.append(_lowerCAmelCase)
UpperCamelCase_ = new_val
return [float(_lowerCAmelCase) for i in new_val]
def _lowerCAmelCase (_lowerCAmelCase):
UpperCamelCase_ , UpperCamelCase_ = table.shape
UpperCamelCase_ = True
for i in range(0 , _lowerCAmelCase):
UpperCamelCase_ = 0
for j in range(0 , cols - 1):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant")
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128
| 0
|
'''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : int = 1000000 ):
UpperCAmelCase_ : int = limit + 1
UpperCAmelCase_ : str = [0] * limit
for first_term in range(1 , lowerCamelCase_ ):
for n in range(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase_ : str = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
UpperCAmelCase_ : Any = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 351
|
'''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : str ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = [0] * len(lowerCamelCase_ )
for i in range(1 , len(lowerCamelCase_ ) ):
# use last results for better performance - dynamic programming
UpperCAmelCase_ : List[Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
UpperCAmelCase_ : str = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
UpperCAmelCase_ : Any = j
return prefix_result
def _lowerCamelCase ( lowerCamelCase_ : str ):
"""simple docstring"""
return max(prefix_function(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 274
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = KandinskyVaaControlnetImgaImgPipeline
SCREAMING_SNAKE_CASE = ["image_embeds", "negative_image_embeds", "image", "hint"]
SCREAMING_SNAKE_CASE = ["image_embeds", "negative_image_embeds", "image", "hint"]
SCREAMING_SNAKE_CASE = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
SCREAMING_SNAKE_CASE = False
@property
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
return 32
@property
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
return 32
@property
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
return self.time_input_dim
@property
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
return 100
@property
def __magic_name__ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
__a ={
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__a =UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__a =VQModel(**self.dummy_movq_kwargs )
return model
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =self.dummy_unet
__a =self.dummy_movq
__a ={
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.0_0085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
__a =DDIMScheduler(**_lowerCamelCase )
__a ={
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __magic_name__ ( self , __snake_case , __snake_case=0 ) -> Any:
'''simple docstring'''
__a =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
__a =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCamelCase )
# create init_image
__a =floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
__a =image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a =Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('RGB' ).resize((256, 256) )
# create hint
__a =floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if str(_lowerCamelCase ).startswith('mps' ):
__a =torch.manual_seed(_lowerCamelCase )
else:
__a =torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
__a ={
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a ='cpu'
__a =self.get_dummy_components()
__a =self.pipeline_class(**_lowerCamelCase )
__a =pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__a =pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
__a =output.images
__a =pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
__a =image[0, -3:, -3:, -1]
__a =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a =np.array(
[0.5498_5034, 0.5550_9365, 0.5256_1504, 0.557_0494, 0.559_3818, 0.526_3979, 0.5028_5643, 0.506_9846, 0.5119_6736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
__a =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__a =init_image.resize((512, 512) )
__a =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
__a =torch.from_numpy(np.array(_lowerCamelCase ) ).float() / 255.0
__a =hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__a ='A robot, 4k photo'
__a =KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
__a =KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
__a =pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
__a =torch.Generator(device='cpu' ).manual_seed(0 )
__a , __a =pipe_prior(
_lowerCamelCase , image=_lowerCamelCase , strength=0.85 , generator=_lowerCamelCase , negative_prompt='' , ).to_tuple()
__a =pipeline(
image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , hint=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='np' , )
__a =output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 218
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=1_8 , _lowerCamelCase=3_0 , _lowerCamelCase=4_0_0 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , ):
lowercase = size if size is not None else {'height': 1_8, 'width': 1_8}
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = image_size
lowercase = min_resolution
lowercase = max_resolution
lowercase = do_resize
lowercase = size
lowercase = do_normalize
lowercase = image_mean
lowercase = image_std
def UpperCamelCase_ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a ( a_, unittest.TestCase ):
UpperCAmelCase_ : Optional[int] =DPTImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
lowercase = DPTImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'image_std' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'size' ) )
def UpperCamelCase_ ( self ):
lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8} )
lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} )
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase = image_processing(_lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase = image_processing(_lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase = image_processing(_lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 220
| 0
|
'''simple docstring'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCAmelCase__ : int = _modexpt(lowerCAmelCase__ , exponent // 2 , lowerCAmelCase__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowerCAmelCase__ , exponent - 1 , lowerCAmelCase__ )) % modulo_value
def a__ ( lowerCAmelCase__ = 17_77 , lowerCAmelCase__ = 18_55 , lowerCAmelCase__ = 8 ) -> int:
UpperCAmelCase__ : List[Any] = base
for _ in range(1 , lowerCAmelCase__ ):
UpperCAmelCase__ : List[Any] = _modexpt(lowerCAmelCase__ , lowerCAmelCase__ , 10**digits )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 354
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class lowerCamelCase_ :
def __init__( self : List[Any] , _A : int | None = None ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = value
UpperCAmelCase__ : Node | None = None # Added in order to delete a node easier
UpperCAmelCase__ : Node | None = None
UpperCAmelCase__ : Node | None = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 )
class lowerCamelCase_ :
def __init__( self : Optional[Any] , _A : Node | None = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = root
def __str__( self : Union[str, Any] ):
'''simple docstring'''
return str(self.root )
def lowercase_ ( self : str , _A : Node , _A : Node | None ):
'''simple docstring'''
if new_children is not None: # reset its kids
UpperCAmelCase__ : Dict = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_A ): # If it is the right children
UpperCAmelCase__ : str = new_children
else:
UpperCAmelCase__ : Optional[int] = new_children
else:
UpperCAmelCase__ : Union[str, Any] = new_children
def lowercase_ ( self : Union[str, Any] , _A : Node ):
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def lowercase_ ( self : int ):
'''simple docstring'''
return self.root is None
def lowercase_ ( self : List[str] , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = Node(_A ) # create a new Node
if self.empty(): # if Tree is empty
UpperCAmelCase__ : List[Any] = new_node # set its root
else: # Tree is not empty
UpperCAmelCase__ : str = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
UpperCAmelCase__ : Optional[Any] = new_node # We insert the new node in a leaf
break
else:
UpperCAmelCase__ : Any = parent_node.left
else:
if parent_node.right is None:
UpperCAmelCase__ : str = new_node
break
else:
UpperCAmelCase__ : List[str] = parent_node.right
UpperCAmelCase__ : Tuple = parent_node
def lowercase_ ( self : Optional[Any] , *_A : Tuple ):
'''simple docstring'''
for value in values:
self.__insert(_A )
def lowercase_ ( self : Union[str, Any] , _A : int ):
'''simple docstring'''
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
UpperCAmelCase__ : List[Any] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
UpperCAmelCase__ : str = node.left if value < node.value else node.right
return node
def lowercase_ ( self : List[Any] , _A : Node | None = None ):
'''simple docstring'''
if node is None:
if self.root is None:
return None
UpperCAmelCase__ : int = self.root
if not self.empty():
while node.right is not None:
UpperCAmelCase__ : Tuple = node.right
return node
def lowercase_ ( self : List[Any] , _A : Node | None = None ):
'''simple docstring'''
if node is None:
UpperCAmelCase__ : Optional[int] = self.root
if self.root is None:
return None
if not self.empty():
UpperCAmelCase__ : Optional[int] = self.root
while node.left is not None:
UpperCAmelCase__ : Tuple = node.left
return node
def lowercase_ ( self : List[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.search(_A ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_A , _A )
elif node.left is None: # Has only right children
self.__reassign_nodes(_A , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_A , node.left )
else:
UpperCAmelCase__ : Union[str, Any] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
UpperCAmelCase__ : Optional[Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def lowercase_ ( self : List[str] , _A : Node | None ):
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def lowercase_ ( self : str , _A : Any=None ):
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def lowercase_ ( self : Dict , _A : list , _A : Node | None ):
'''simple docstring'''
if node:
self.inorder(_A , node.left )
arr.append(node.value )
self.inorder(_A , node.right )
def lowercase_ ( self : Optional[Any] , _A : int , _A : Node ):
'''simple docstring'''
UpperCAmelCase__ : list[int] = []
self.inorder(_A , _A ) # append all values to list using inorder traversal
return arr[k - 1]
def a__ ( lowerCAmelCase__ ) -> list[Node]:
UpperCAmelCase__ : Union[str, Any] = []
if curr_node is not None:
UpperCAmelCase__ : str = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def a__ ( ) -> None:
UpperCAmelCase__ : List[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
UpperCAmelCase__ : str = BinarySearchTree()
for i in testlist:
t.insert(lowerCAmelCase__ )
# Prints all the elements of the list in order traversal
print(lowerCAmelCase__ )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(lowerCAmelCase__ )
print(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 299
| 0
|
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : List[str] = """bart"""
lowerCamelCase_ : List[str] = ["""past_key_values"""]
lowerCamelCase_ : Dict = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , UpperCamelCase__=5_0265 , UpperCamelCase__=1024 , UpperCamelCase__=12 , UpperCamelCase__=4096 , UpperCamelCase__=16 , UpperCamelCase__=12 , UpperCamelCase__=4096 , UpperCamelCase__=16 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__="gelu" , UpperCamelCase__=1024 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=0.0 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=3 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__=True , UpperCamelCase__=2 , UpperCamelCase__=2 , **UpperCamelCase__ , ) -> Optional[int]:
lowerCamelCase : int = vocab_size
lowerCamelCase : Optional[int] = max_position_embeddings
lowerCamelCase : Tuple = d_model
lowerCamelCase : Optional[int] = encoder_ffn_dim
lowerCamelCase : str = encoder_layers
lowerCamelCase : Union[str, Any] = encoder_attention_heads
lowerCamelCase : Tuple = decoder_ffn_dim
lowerCamelCase : Tuple = decoder_layers
lowerCamelCase : str = decoder_attention_heads
lowerCamelCase : Union[str, Any] = dropout
lowerCamelCase : Optional[int] = attention_dropout
lowerCamelCase : Optional[Any] = activation_dropout
lowerCamelCase : int = activation_function
lowerCamelCase : Union[str, Any] = init_std
lowerCamelCase : Optional[Any] = encoder_layerdrop
lowerCamelCase : List[str] = decoder_layerdrop
lowerCamelCase : Optional[int] = classifier_dropout
lowerCamelCase : Optional[int] = use_cache
lowerCamelCase : List[Any] = encoder_layers
lowerCamelCase : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , UpperCamelCase__ ):
lowerCamelCase : str = self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"The config can simply be saved and uploaded again to be fixed." )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase : List[Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
lowerCamelCase : int = {0: "batch"}
lowerCamelCase : str = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCamelCase : List[Any] = {0: "batch", 1: "decoder_sequence"}
lowerCamelCase : Dict = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCamelCase : Union[str, Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
lowerCamelCase , lowerCamelCase : str = self.num_layers
for i in range(UpperCamelCase__ ):
lowerCamelCase : Any = {0: "batch", 2: "past_sequence + sequence"}
lowerCamelCase : Tuple = {0: "batch", 2: "past_sequence + sequence"}
else:
lowerCamelCase : Optional[int] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase : List[str] = super().outputs
else:
lowerCamelCase : Union[str, Any] = super(UpperCamelCase__ , self ).outputs
if self.use_past:
lowerCamelCase , lowerCamelCase : List[str] = self.num_layers
for i in range(UpperCamelCase__ ):
lowerCamelCase : Dict = {0: "batch", 2: "past_sequence + sequence"}
lowerCamelCase : str = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ) -> Mapping[str, Any]:
lowerCamelCase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
lowerCamelCase : Optional[int] = seq_length if not self.use_past else 1
lowerCamelCase : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
lowerCamelCase : int = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCamelCase , lowerCamelCase : Union[str, Any] = common_inputs["input_ids"].shape
lowerCamelCase : Union[str, Any] = common_inputs["decoder_input_ids"].shape[1]
lowerCamelCase , lowerCamelCase : List[str] = self.num_attention_heads
lowerCamelCase : List[str] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase : str = decoder_seq_length + 3
lowerCamelCase : List[str] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCamelCase : List[str] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
lowerCamelCase : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCamelCase , lowerCamelCase : Tuple = self.num_layers
lowerCamelCase : str = min(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : str = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
lowerCamelCase : Tuple = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
lowerCamelCase : Any = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ) -> Mapping[str, Any]:
lowerCamelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCamelCase , lowerCamelCase : Optional[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCamelCase : Optional[Any] = seqlen + 2
lowerCamelCase , lowerCamelCase : Tuple = self.num_layers
lowerCamelCase , lowerCamelCase : Tuple = self.num_attention_heads
lowerCamelCase : int = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase : Any = common_inputs["attention_mask"].dtype
lowerCamelCase : Any = torch.cat(
[common_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
lowerCamelCase : Optional[Any] = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCamelCase : Dict = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase : str = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase : Union[str, Any] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCamelCase : Any = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase : Optional[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
elif self.task == "causal-lm":
lowerCamelCase : int = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
lowerCamelCase : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase : Union[str, Any] = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
lowerCamelCase : int = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
| 48
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase : List[Any] = len(bin(__magic_name__ )[3:] )
UpperCAmelCase : Optional[Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase : Tuple = (
(
"1"
+ "0" * (binary_number_length - len(__magic_name__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
lowercase__ = logging.getLogger(__name__)
lowercase__ = {"facebook/bart-base": BartForConditionalGeneration}
lowercase__ = {"facebook/bart-base": BartTokenizer}
def _UpperCamelCase ( ) -> int:
'''simple docstring'''
snake_case : int = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=SCREAMING_SNAKE_CASE__ , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=SCREAMING_SNAKE_CASE__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=SCREAMING_SNAKE_CASE__ , )
parser.add_argument(
'''--config_name''' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=SCREAMING_SNAKE_CASE__ , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='''Where to store the final ONNX file.''' )
snake_case : List[str] = parser.parse_args()
return args
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="cpu" ) -> int:
'''simple docstring'''
snake_case : Dict = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
snake_case : Any = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE__ )
if model_name in ["facebook/bart-base"]:
snake_case : Dict = 0
snake_case : Optional[Any] = None
snake_case : int = 0
return huggingface_model, tokenizer
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
'''simple docstring'''
model.eval()
snake_case : List[Any] = None
snake_case : Tuple = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE__ ) )
with torch.no_grad():
snake_case : Optional[int] = '''My friends are cool but they eat too many carbs.'''
snake_case : int = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='''pt''' ).to(model.device )
snake_case : Dict = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , early_stopping=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
SCREAMING_SNAKE_CASE__ , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , SCREAMING_SNAKE_CASE__ , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=SCREAMING_SNAKE_CASE__ , )
logger.info('''Model exported to {}'''.format(SCREAMING_SNAKE_CASE__ ) )
snake_case : Any = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE__ ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(SCREAMING_SNAKE_CASE__ ) )
snake_case : int = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE__ )
snake_case : Optional[int] = ort_sess.run(
SCREAMING_SNAKE_CASE__ , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(SCREAMING_SNAKE_CASE__ ),
'''max_length''': np.array(SCREAMING_SNAKE_CASE__ ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def _UpperCamelCase ( ) -> Any:
'''simple docstring'''
snake_case : List[str] = parse_args()
snake_case : Tuple = 5
snake_case : int = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
snake_case : str = torch.device(args.device )
snake_case : Any = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE__ )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(SCREAMING_SNAKE_CASE__ )
if args.max_length:
snake_case : Tuple = args.max_length
if args.num_beams:
snake_case : List[Any] = args.num_beams
if args.output_file_path:
snake_case : str = args.output_file_path
else:
snake_case : int = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 351
|
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = DebertaTokenizer
lowerCamelCase = True
lowerCamelCase = DebertaTokenizerFast
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case : int = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
snake_case : Optional[int] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
snake_case : Tuple = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
snake_case : List[Any] = {'''unk_token''': '''[UNK]'''}
snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
def lowerCAmelCase ( self : Union[str, Any] , **UpperCamelCase__ : Any ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCAmelCase ( self : Any , UpperCamelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
snake_case : Tuple = '''lower newer'''
snake_case : Optional[Any] = '''lower newer'''
return input_text, output_text
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case : Dict = self.get_tokenizer()
snake_case : Optional[Any] = '''lower newer'''
snake_case : Tuple = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
snake_case : Optional[Any] = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Union[str, Any] = tokens + [tokenizer.unk_token]
snake_case : List[str] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
snake_case : int = self.get_tokenizer()
snake_case : Optional[int] = tokenizer('''Hello''' , '''World''' )
snake_case : Optional[Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , UpperCamelCase__ )
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
snake_case : Optional[int] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
snake_case : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase__ )
snake_case : List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase__ )
snake_case : Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
snake_case : Optional[int] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
snake_case : List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
snake_case : int = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
snake_case : Dict = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
snake_case : Any = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
snake_case : Optional[Any] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
snake_case : Optional[Any] = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ )
snake_case : List[str] = [tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) for seq in encoding['''input_ids''']]
# fmt: off
snake_case : Optional[int] = {
'''input_ids''': [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
snake_case : Any = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , UpperCamelCase__ )
for expected, decoded in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 83
| 0
|
'''simple docstring'''
from __future__ import annotations
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> list[str]:
if nth_term == "":
return [""]
UpperCamelCase = int(__UpperCamelCase )
UpperCamelCase = int(__UpperCamelCase )
UpperCamelCase = []
for temp in range(int(__UpperCamelCase ) ):
series.append(F"1 / {pow(temp + 1 , int(__UpperCamelCase ) )}" if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = int(input('Enter the last number (nth term) of the P-Series'))
SCREAMING_SNAKE_CASE__ = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 321
|
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__UpperCamelCase , 2 ) + pow(__UpperCamelCase , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321
| 1
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowercase : Optional[int] = (720, 1280) # Height, Width
lowercase : Any = (0.4, 0.6) # if height or width lower than this scale, drop it.
lowercase : str = 1 / 100
lowercase : int = """"""
lowercase : Tuple = """"""
lowercase : List[str] = """"""
lowercase : Dict = 250
def _snake_case( ) -> None:
lowercase : Any = get_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for index in range(SCREAMING_SNAKE_CASE__ ):
lowercase : int = random.sample(range(len(SCREAMING_SNAKE_CASE__ ) ) , 4 )
lowercase : Union[str, Any] = update_image_and_anno(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , filter_scale=SCREAMING_SNAKE_CASE__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase : Optional[int] = random_chars(32 )
lowercase : Optional[int] = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
lowercase : str = f"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(f"{file_root}.jpg" , SCREAMING_SNAKE_CASE__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
lowercase : Tuple = []
for anno in new_annos:
lowercase : List[str] = anno[3] - anno[1]
lowercase : Any = anno[4] - anno[2]
lowercase : Union[str, Any] = anno[1] + width / 2
lowercase : Tuple = anno[2] + height / 2
lowercase : Tuple = f"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(SCREAMING_SNAKE_CASE__ )
with open(f"{file_root}.txt" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> tuple[list, list]:
lowercase : List[Any] = []
lowercase : Optional[Any] = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE__ , """*.txt""" ) ):
lowercase : Any = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(SCREAMING_SNAKE_CASE__ ) as in_file:
lowercase : str = in_file.readlines()
lowercase : Any = os.path.join(SCREAMING_SNAKE_CASE__ , f"{label_name}.jpg" )
lowercase : Optional[int] = []
for obj_list in obj_lists:
lowercase : List[str] = obj_list.rstrip("""\n""" ).split(""" """ )
lowercase : Tuple = float(obj[1] ) - float(obj[3] ) / 2
lowercase : int = float(obj[2] ) - float(obj[4] ) / 2
lowercase : Tuple = float(obj[1] ) + float(obj[3] ) / 2
lowercase : int = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE__ )
labels.append(SCREAMING_SNAKE_CASE__ )
return img_paths, labels
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0.0 , ) -> tuple[list, list, str]:
lowercase : List[Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowercase : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase : int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase : str = int(scale_x * output_size[1] )
lowercase : Tuple = int(scale_y * output_size[0] )
lowercase : Any = []
lowercase : Any = []
for i, index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = all_img_list[index]
path_list.append(SCREAMING_SNAKE_CASE__ )
lowercase : str = all_annos[index]
lowercase : List[str] = cva.imread(SCREAMING_SNAKE_CASE__ )
if i == 0: # top-left
lowercase : List[str] = cva.resize(SCREAMING_SNAKE_CASE__ , (divid_point_x, divid_point_y) )
lowercase : Union[str, Any] = img
for bbox in img_annos:
lowercase : Optional[Any] = bbox[1] * scale_x
lowercase : Optional[Any] = bbox[2] * scale_y
lowercase : List[str] = bbox[3] * scale_x
lowercase : List[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowercase : List[str] = cva.resize(SCREAMING_SNAKE_CASE__ , (output_size[1] - divid_point_x, divid_point_y) )
lowercase : List[Any] = img
for bbox in img_annos:
lowercase : Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
lowercase : Tuple = bbox[2] * scale_y
lowercase : Union[str, Any] = scale_x + bbox[3] * (1 - scale_x)
lowercase : str = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowercase : Any = cva.resize(SCREAMING_SNAKE_CASE__ , (divid_point_x, output_size[0] - divid_point_y) )
lowercase : int = img
for bbox in img_annos:
lowercase : str = bbox[1] * scale_x
lowercase : str = scale_y + bbox[2] * (1 - scale_y)
lowercase : Optional[Any] = bbox[3] * scale_x
lowercase : List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowercase : Dict = cva.resize(
SCREAMING_SNAKE_CASE__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowercase : Union[str, Any] = img
for bbox in img_annos:
lowercase : int = scale_x + bbox[1] * (1 - scale_x)
lowercase : List[str] = scale_y + bbox[2] * (1 - scale_y)
lowercase : Dict = scale_x + bbox[3] * (1 - scale_x)
lowercase : Tuple = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowercase : Dict = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> str:
assert number_char > 1, "The number of character should greater than 1"
lowercase : Optional[int] = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 352
|
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and number_of_steps > 0
), f"number_of_steps needs to be positive integer, your input {number_of_steps}"
if number_of_steps == 1:
return 1
lowercase , lowercase : Tuple = 1, 1
for _ in range(number_of_steps - 1 ):
lowercase , lowercase : str = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
A : Tuple = None
A : Optional[Any] = logging.get_logger(__name__)
A : Tuple = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A : List[str] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
A : List[str] = {
'''albert-base-v1''': 5_1_2,
'''albert-large-v1''': 5_1_2,
'''albert-xlarge-v1''': 5_1_2,
'''albert-xxlarge-v1''': 5_1_2,
'''albert-base-v2''': 5_1_2,
'''albert-large-v2''': 5_1_2,
'''albert-xlarge-v2''': 5_1_2,
'''albert-xxlarge-v2''': 5_1_2,
}
A : Optional[int] = '''▁'''
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : str = VOCAB_FILES_NAMES
__lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : List[str] = AlbertTokenizer
def __init__( self : Tuple , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : str=False , __lowerCAmelCase : Union[str, Any]="[CLS]" , __lowerCAmelCase : int="[SEP]" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Dict="[SEP]" , __lowerCAmelCase : Union[str, Any]="<pad>" , __lowerCAmelCase : str="[CLS]" , __lowerCAmelCase : int="[MASK]" , **__lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
A__ = (
AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase , normalized=__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase )
else mask_token
)
super().__init__(
__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , **__lowerCAmelCase , )
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = False if not self.vocab_file else True
def a_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a_ ( self : Union[str, Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A__ = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
return (out_vocab_file,)
| 274
| 0
|
"""simple docstring"""
from collections import defaultdict
class a :
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: str , UpperCamelCase: Dict ):
"""simple docstring"""
A__ = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
A__ = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(__lowercase ) )
]
A__ = defaultdict(__lowercase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
A__ = (1 << len(__lowercase )) - 1
def UpperCamelCase ( self: List[Any] , UpperCamelCase: str , UpperCamelCase: List[Any] ):
"""simple docstring"""
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
A__ = self.count_ways_until(__lowercase , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
A__ = total_ways_util
return self.dp[mask][task_no]
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: List[str] ):
"""simple docstring"""
for i in range(len(__lowercase ) ):
for j in task_performed[i]:
self.task[j].append(__lowercase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[str] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
SCREAMING_SNAKE_CASE_ : List[Any] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 366
|
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def _snake_case ( UpperCAmelCase_ : int="ro" , UpperCAmelCase_ : Optional[int]="en" , UpperCAmelCase_ : List[Any]="wmt16" , UpperCAmelCase_ : str=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
A__ = F"""{src_lang}-{tgt_lang}"""
print(F"""Converting {dataset}-{pair}""" )
A__ = datasets.load_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
if save_dir is None:
A__ = F"""{dataset}-{pair}"""
A__ = Path(UpperCAmelCase_ )
save_dir.mkdir(exist_ok=UpperCAmelCase_ )
for split in ds.keys():
print(F"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
A__ = """val""" if split == """validation""" else split
A__ = save_dir.joinpath(F"""{fn}.source""" )
A__ = save_dir.joinpath(F"""{fn}.target""" )
A__ = src_path.open("""w+""" )
A__ = tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
A__ = x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(F"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 69
| 0
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase__ = get_tests_dir('''fixtures''')
lowerCAmelCase__ = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
lowerCAmelCase__ = get_tests_dir('''fixtures/dummy-config.json''')
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(_A ,_A )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A ,_A )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__lowercase = AutoFeatureExtractor.from_pretrained(_A ).to_dict()
config_dict.pop('''feature_extractor_type''' )
__lowercase = WavaVecaFeatureExtractor(**_A )
# save in new folder
model_config.save_pretrained(_A )
config.save_pretrained(_A )
__lowercase = AutoFeatureExtractor.from_pretrained(_A )
# make sure private variable is not incorrectly saved
__lowercase = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_A ,_A )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A ,_A )
def SCREAMING_SNAKE_CASE ( self : int ):
with self.assertRaisesRegex(
_A ,'''bert-base is not a local folder and is not a valid model identifier''' ):
__lowercase = AutoFeatureExtractor.from_pretrained('''bert-base''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
with self.assertRaisesRegex(
_A ,r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__lowercase = AutoFeatureExtractor.from_pretrained(_A ,revision='''aaaaaa''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
with self.assertRaisesRegex(
_A ,'''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' ,):
__lowercase = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_A ):
__lowercase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A ):
__lowercase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' ,trust_remote_code=_A )
__lowercase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' ,trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__ ,'''NewFeatureExtractor''' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_A )
__lowercase = AutoFeatureExtractor.from_pretrained(_A ,trust_remote_code=_A )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ ,'''NewFeatureExtractor''' )
def SCREAMING_SNAKE_CASE ( self : str ):
try:
AutoConfig.register('''custom''' ,_A )
AutoFeatureExtractor.register(_A ,_A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoFeatureExtractor.register(_A ,_A )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowercase = CustomFeatureExtractor.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_A )
__lowercase = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A ,_A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self : Dict ):
class lowercase_ (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = True
try:
AutoConfig.register('''custom''' ,_A )
AutoFeatureExtractor.register(_A ,_A )
# If remote code is not set, the default is to use local
__lowercase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
self.assertEqual(feature_extractor.__class__.__name__ ,'''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__lowercase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' ,trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__ ,'''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__lowercase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' ,trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__ ,'''NewFeatureExtractor''' )
self.assertTrue(not hasattr(_A ,'''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 104
|
from cva import destroyAllWindows, imread, imshow, waitKey
def A__ ( __lowerCamelCase ):
# getting number of pixels in the image
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__UpperCAmelCase = imread("image_data/lena.jpg", 1)
# convert to its negative
__UpperCAmelCase = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 299
| 0
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def lowerCamelCase__ ( snake_case_ : Optional[Any] , snake_case_ : str ) -> List[str]:
__snake_case = args.log_outputs
__snake_case = '_'.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
__snake_case = load_metric('''wer''' )
__snake_case = load_metric('''cer''' )
# compute metrics
__snake_case = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
__snake_case = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
__snake_case = f"""WER: {wer_result}\nCER: {cer_result}"""
print(_UpperCAmelCase )
with open(f"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(_UpperCAmelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__snake_case = f"""log_{dataset_id}_predictions.txt"""
__snake_case = f"""log_{dataset_id}_targets.txt"""
with open(_UpperCAmelCase , '''w''' ) as p, open(_UpperCAmelCase , '''w''' ) as t:
# mapping function to write output
def write_to_file(snake_case_ : Any , snake_case_ : Any ):
p.write(f"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(_UpperCAmelCase , with_indices=_UpperCAmelCase )
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> str:
__snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__snake_case = re.sub(_UpperCAmelCase , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__snake_case = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__snake_case = ' '.join(text.split(_UpperCAmelCase ) )
return text
def lowerCamelCase__ ( snake_case_ : List[Any] ) -> Union[str, Any]:
# load dataset
__snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_UpperCAmelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__snake_case = AutoFeatureExtractor.from_pretrained(args.model_id )
__snake_case = feature_extractor.sampling_rate
# resample audio
__snake_case = dataset.cast_column('''audio''' , Audio(sampling_rate=_UpperCAmelCase ) )
# load eval pipeline
if args.device is None:
__snake_case = 0 if torch.cuda.is_available() else -1
__snake_case = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case_ : Tuple ):
__snake_case = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__snake_case = prediction['text']
__snake_case = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
__snake_case = dataset.map(_UpperCAmelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
snake_case_ = parser.parse_args()
main(args)
| 351
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
snake_case_ = data_utils.TransfoXLTokenizer
snake_case_ = data_utils.TransfoXLCorpus
snake_case_ = data_utils
snake_case_ = data_utils
def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : int ) -> Dict:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case_ , '''rb''' ) as fp:
__snake_case = pickle.load(snake_case_ , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__snake_case = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" )
__snake_case = corpus.vocab.__dict__
torch.save(snake_case_ , snake_case_ )
__snake_case = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , snake_case_ )
__snake_case = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(f"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(snake_case_ , snake_case_ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__snake_case = os.path.abspath(snake_case_ )
__snake_case = os.path.abspath(snake_case_ )
print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__snake_case = TransfoXLConfig()
else:
__snake_case = TransfoXLConfig.from_json_file(snake_case_ )
print(f"""Building PyTorch model from configuration: {config}""" )
__snake_case = TransfoXLLMHeadModel(snake_case_ )
__snake_case = load_tf_weights_in_transfo_xl(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
__snake_case = os.path.join(snake_case_ , snake_case_ )
__snake_case = os.path.join(snake_case_ , snake_case_ )
print(f"""Save PyTorch model to {os.path.abspath(snake_case_ )}""" )
torch.save(model.state_dict() , snake_case_ )
print(f"""Save configuration file to {os.path.abspath(snake_case_ )}""" )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
snake_case_ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 238
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : int = {
'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'],
'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = ['VisionTextDualEncoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ['FlaxVisionTextDualEncoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['TFVisionTextDualEncoderModel']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
__A : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 120
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class lowercase__ :
def __init__( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : MutableSequence[float] ):
'''simple docstring'''
if len(lowerCamelCase__ ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
_UpperCamelCase : list[float] = list(lowerCamelCase__ )
_UpperCamelCase : Tuple = degree
def __add__( self : Optional[int] ,lowerCamelCase__ : Polynomial ):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_UpperCamelCase : str = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree ,lowerCamelCase__ )
else:
_UpperCamelCase : str = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree ,lowerCamelCase__ )
def __sub__( self : Dict ,lowerCamelCase__ : Polynomial ):
'''simple docstring'''
return self + polynomial_a * Polynomial(0 ,[-1] )
def __neg__( self : Dict ):
'''simple docstring'''
return Polynomial(self.degree ,[-c for c in self.coefficients] )
def __mul__( self : Union[str, Any] ,lowerCamelCase__ : Polynomial ):
'''simple docstring'''
_UpperCamelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : int | float ):
'''simple docstring'''
_UpperCamelCase : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = ''
for i in range(self.degree ,-1 ,-1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCamelCase__ )
return polynomial
def __repr__( self : List[str] ):
'''simple docstring'''
return self.__str__()
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : list[float] = [0] * self.degree
for i in range(self.degree ):
_UpperCamelCase : Optional[int] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : int | float = 0 ):
'''simple docstring'''
_UpperCamelCase : list[float] = [0] * (self.degree + 2)
_UpperCamelCase : Any = constant
for i in range(self.degree + 1 ):
_UpperCamelCase : Optional[Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 ,lowerCamelCase__ )
def __eq__( self : str ,lowerCamelCase__ : object ):
'''simple docstring'''
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[str] ,lowerCamelCase__ : object ):
'''simple docstring'''
return not self.__eq__(lowerCamelCase__ )
| 83
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __lowerCAmelCase ( A__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = "big_bird"
def __init__( self , _a=50_358 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=4_096 , _a=2 , _a=0.02 , _a=1E-12 , _a=True , _a=0 , _a=1 , _a=2 , _a=66 , _a="block_sparse" , _a=True , _a=False , _a=64 , _a=3 , _a=None , **_a , ):
super().__init__(
pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , sep_token_id=__A , **__A , )
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = type_vocab_size
__a = layer_norm_eps
__a = use_cache
__a = rescale_embeddings
__a = attention_type
__a = use_bias
__a = block_size
__a = num_random_blocks
__a = classifier_dropout
class __lowerCAmelCase ( A__ ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ):
if self.task == "multiple-choice":
__a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 351
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 'funnel'
__UpperCAmelCase : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
}
def __init__( self , _a=30_522 , _a=[4, 4, 4] , _a=None , _a=2 , _a=768 , _a=12 , _a=64 , _a=3_072 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1E-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = block_sizes
__a = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__a = num_decoder_layers
__a = d_model
__a = n_head
__a = d_head
__a = d_inner
__a = hidden_act
__a = hidden_dropout
__a = attention_dropout
__a = activation_dropout
__a = initializer_range
__a = initializer_std
__a = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
__a = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
__a = attention_type
__a = separate_cls
__a = truncate_seq
__a = pool_q_only
super().__init__(**_a )
@property
def __UpperCAmelCase ( self ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def __UpperCAmelCase ( self , _a ):
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __UpperCAmelCase ( self ):
return len(self.block_sizes )
@num_blocks.setter
def __UpperCAmelCase ( self , _a ):
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 11
| 0
|
from typing import Dict, Optional
import numpy as np
import datasets
A : List[str] = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
A : List[Any] = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
A : Optional[int] = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def lowercase_ ( _A : int , _A : Any , _A : List[Any] , _A : Optional[Any] , _A : Any = None , _A : int = False , ):
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
lowerCamelCase__ : Tuple = new_id
# turn into Numpy arrays
lowerCamelCase__ : Any = np.array(UpperCamelCase__ )
lowerCamelCase__ : List[str] = np.array(UpperCamelCase__ )
if reduce_labels:
lowerCamelCase__ : List[Any] = 255
lowerCamelCase__ : Tuple = label - 1
lowerCamelCase__ : int = 255
lowerCamelCase__ : List[Any] = label != ignore_index
lowerCamelCase__ : Any = np.not_equal(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : int = pred_label[mask]
lowerCamelCase__ : Dict = np.array(UpperCamelCase__ )[mask]
lowerCamelCase__ : int = pred_label[pred_label == label]
lowerCamelCase__ : Tuple = np.histogram(UpperCamelCase__ , bins=UpperCamelCase__ , range=(0, num_labels - 1) )[0]
lowerCamelCase__ : Union[str, Any] = np.histogram(UpperCamelCase__ , bins=UpperCamelCase__ , range=(0, num_labels - 1) )[0]
lowerCamelCase__ : Dict = np.histogram(UpperCamelCase__ , bins=UpperCamelCase__ , range=(0, num_labels - 1) )[0]
lowerCamelCase__ : int = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def lowercase_ ( _A : Optional[Any] , _A : List[str] , _A : Tuple , _A : Optional[Any] , _A : Dict = None , _A : Tuple = False , ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = np.zeros((num_labels,) , dtype=np.floataa )
lowerCamelCase__ : str = np.zeros((num_labels,) , dtype=np.floataa )
lowerCamelCase__ : Dict = np.zeros((num_labels,) , dtype=np.floataa )
lowerCamelCase__ : Optional[int] = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = intersect_and_union(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def lowercase_ ( _A : Optional[Any] , _A : Any , _A : List[str] , _A : List[str] , _A : Any = None , _A : Tuple = None , _A : List[Any] = False , ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = total_intersect_and_union(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# compute metrics
lowerCamelCase__ : Any = {}
lowerCamelCase__ : Dict = total_area_intersect.sum() / total_area_label.sum()
lowerCamelCase__ : List[Any] = total_area_intersect / total_area_union
lowerCamelCase__ : str = total_area_intersect / total_area_label
lowerCamelCase__ : int = np.nanmean(UpperCamelCase__ )
lowerCamelCase__ : int = np.nanmean(UpperCamelCase__ )
lowerCamelCase__ : List[str] = all_acc
lowerCamelCase__ : List[Any] = iou
lowerCamelCase__ : List[str] = acc
if nan_to_num is not None:
lowerCamelCase__ : Tuple = {metric: np.nan_to_num(UpperCamelCase__ , nan=UpperCamelCase__ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _lowercase ( datasets.Metric):
"""simple docstring"""
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
} ) , reference_urls=[
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
] , )
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : str = None , __lowerCamelCase : int = None , __lowerCamelCase : Optional[Any] = False , ):
'''simple docstring'''
lowerCamelCase__ : Tuple = mean_iou(
results=__lowerCamelCase , gt_seg_maps=__lowerCamelCase , num_labels=__lowerCamelCase , ignore_index=__lowerCamelCase , nan_to_num=__lowerCamelCase , label_map=__lowerCamelCase , reduce_labels=__lowerCamelCase , )
return iou_result
| 184
|
class lowercase :
def __init__( self , snake_case , snake_case , snake_case ):
snake_case_ = name
snake_case_ = value
snake_case_ = weight
def __repr__( self ):
return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def a ( self ):
return self.value
def a ( self ):
return self.name
def a ( self ):
return self.weight
def a ( self ):
return self.value / self.weight
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = []
for i in range(len(UpperCamelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = sorted(UpperCamelCase__ , key=UpperCamelCase__ , reverse=UpperCamelCase__ )
snake_case_ = []
snake_case_ , snake_case_ = 0.0, 0.0
for i in range(len(UpperCamelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __lowerCamelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285
| 0
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCamelCase_ : Dict = False
class a__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__a = torch.manual_seed(0 )
__a = pipe.dual_guided(
prompt='first prompt' , image=_UpperCAmelCase , text_to_image_strength=0.75 , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
__a = VersatileDiffusionPipeline.from_pretrained(_UpperCAmelCase , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a = generator.manual_seed(0 )
__a = pipe.dual_guided(
prompt='first prompt' , image=_UpperCAmelCase , text_to_image_strength=0.75 , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a = 'cyberpunk 2077'
__a = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__a = torch.manual_seed(0 )
__a = pipe.dual_guided(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , text_to_image_strength=0.75 , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
__a = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__a = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__a = 'A painting of a squirrel eating a burger '
__a = torch.manual_seed(0 )
__a = pipe.text_to_image(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
__a = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__a = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__a = pipe.image_variation(_UpperCAmelCase , generator=_UpperCAmelCase , output_type='numpy' ).images
__a = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__a = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 367
|
def lowerCAmelCase( __lowerCamelCase ):
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
__a = ''
while len(__lowerCamelCase ) % 3 != 0:
__a = '0' + bin_string
__a = [
bin_string[index : index + 3]
for index in range(len(__lowerCamelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__a = 0
for index, val in enumerate(__lowerCamelCase ):
oct_val += int(2 ** (2 - index) * int(__lowerCamelCase ) )
oct_string += str(__lowerCamelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 197
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
A__ : Optional[int] = logging.get_logger(__name__)
A__ : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
A__ : Optional[Any] = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
A__ : List[Any] = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
class __snake_case ( UpperCamelCase_ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['''input_ids''', '''attention_mask''']
_a = BartTokenizer
def __init__( self : Optional[int] , A_ : int=None , A_ : Any=None , A_ : Tuple=None , A_ : str="replace" , A_ : List[Any]="<s>" , A_ : List[Any]="</s>" , A_ : Any="</s>" , A_ : List[str]="<s>" , A_ : Any="<unk>" , A_ : Tuple="<pad>" , A_ : List[str]="<mask>" , A_ : int=False , A_ : List[str]=True , **A_ : str , ):
super().__init__(
A_ , A_ , tokenizer_file=A_ , errors=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , trim_offsets=A_ , **A_ , )
lowerCAmelCase_ : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , A_) != add_prefix_space:
lowerCAmelCase_ : str = getattr(A_ , pre_tok_state.pop('''type'''))
lowerCAmelCase_ : int = add_prefix_space
lowerCAmelCase_ : List[Any] = pre_tok_class(**A_)
lowerCAmelCase_ : Optional[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCAmelCase_ : Optional[int] = '''post_processor'''
lowerCAmelCase_ : Tuple = getattr(self.backend_tokenizer , A_ , A_)
if tokenizer_component_instance:
lowerCAmelCase_ : List[Any] = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase_ : Optional[Any] = tuple(state['''sep'''])
if "cls" in state:
lowerCAmelCase_ : List[str] = tuple(state['''cls'''])
lowerCAmelCase_ : List[str] = False
if state.get('''add_prefix_space''' , A_) != add_prefix_space:
lowerCAmelCase_ : List[str] = add_prefix_space
lowerCAmelCase_ : str = True
if state.get('''trim_offsets''' , A_) != trim_offsets:
lowerCAmelCase_ : Optional[Any] = trim_offsets
lowerCAmelCase_ : Optional[Any] = True
if changes_to_apply:
lowerCAmelCase_ : str = getattr(A_ , state.pop('''type'''))
lowerCAmelCase_ : int = component_class(**A_)
setattr(self.backend_tokenizer , A_ , A_)
@property
def UpperCAmelCase__ ( self : Tuple):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''')
return None
return str(self._mask_token)
@mask_token.setter
def UpperCAmelCase__ ( self : Any , A_ : Union[str, Any]):
lowerCAmelCase_ : Optional[int] = AddedToken(A_ , lstrip=A_ , rstrip=A_) if isinstance(A_ , A_) else value
lowerCAmelCase_ : Optional[int] = value
def UpperCAmelCase__ ( self : int , *A_ : Tuple , **A_ : Tuple):
lowerCAmelCase_ : Any = kwargs.get('''is_split_into_words''' , A_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''')
return super()._batch_encode_plus(*A_ , **A_)
def UpperCAmelCase__ ( self : List[Any] , *A_ : Any , **A_ : int):
lowerCAmelCase_ : List[str] = kwargs.get('''is_split_into_words''' , A_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''')
return super()._encode_plus(*A_ , **A_)
def UpperCAmelCase__ ( self : List[str] , A_ : str , A_ : Optional[str] = None):
lowerCAmelCase_ : str = self._tokenizer.model.save(A_ , name=A_)
return tuple(A_)
def UpperCAmelCase__ ( self : str , A_ : Tuple , A_ : int=None):
lowerCAmelCase_ : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Optional[Any] , A_ : List[int] , A_ : Optional[List[int]] = None):
lowerCAmelCase_ : str = [self.sep_token_id]
lowerCAmelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 103
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69
| 0
|
from __future__ import annotations
import math
from collections.abc import Callable
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 100, ):
UpperCAmelCase_ : Dict = x_start
UpperCAmelCase_ : Union[str, Any] = fnc(a__ )
UpperCAmelCase_ : Optional[int] = 0.0
for _ in range(a__ ):
# Approximates curve as a sequence of linear lines and sums their length
UpperCAmelCase_ : Union[str, Any] = (x_end - x_start) / steps + xa
UpperCAmelCase_ : Dict = fnc(a__ )
length += math.hypot(xa - xa, fxa - fxa )
# Increment step
UpperCAmelCase_ : Optional[int] = xa
UpperCAmelCase_ : Any = fxa
return length
if __name__ == "__main__":
def __a ( __lowerCamelCase ):
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
_a = 10
while i <= 100_000:
print(f"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 368
|
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_a = object()
# For specifying empty leaf dict `{}`
_a = object()
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(__lowerCamelCase ) - len(__lowerCamelCase ) + 1 ):
UpperCAmelCase_ : List[str] = [x.match(__lowerCamelCase ) for x, y in zip(__lowerCamelCase, ks[i:] )]
if matches and all(__lowerCamelCase ):
return True
return False
def __a ( __lowerCamelCase ):
def replace(__lowerCamelCase, __lowerCamelCase ):
for rule, replacement in rules:
if _match(__lowerCamelCase, __lowerCamelCase ):
return replacement
return val
return replace
def __a ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp", __lowerCamelCase )),
(("transformer", "wte", "embedding"), P("mp", __lowerCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCamelCase, "mp" )),
(("attention", "out_proj", "kernel"), P("mp", __lowerCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCamelCase, "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp", __lowerCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[str] = _get_partition_rules()
UpperCAmelCase_ : Any = _replacement_rules(__lowerCamelCase )
UpperCAmelCase_ : Any = {k: _unmatched for k in flatten_dict(__lowerCamelCase )}
UpperCAmelCase_ : Dict = {k: replace(__lowerCamelCase, __lowerCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCamelCase ) )
| 23
| 0
|
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_a = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_a = [ord(letter) for letter in string.ascii_lowercase]
_a = {ord(char) for char in VALID_CHARS}
_a = ["the", "be", "to", "of", "and", "in", "that", "have"]
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> str | None:
"""simple docstring"""
_UpperCAmelCase = ""
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for keychar, cipherchar in zip(cycle(__lowerCAmelCase ) , __lowerCAmelCase ):
_UpperCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__lowerCAmelCase )
return decoded
def __A ( __lowerCAmelCase )-> list[str]:
"""simple docstring"""
_UpperCAmelCase = []
for key in product(__lowerCAmelCase , repeat=3 ):
_UpperCAmelCase = try_key(__lowerCAmelCase , __lowerCAmelCase )
if encoded is not None:
possibles.append(__lowerCAmelCase )
return possibles
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def __A ( __lowerCAmelCase = "p059_cipher.txt" )-> int:
"""simple docstring"""
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = Path(__lowerCAmelCase ).parent.joinpath(__lowerCAmelCase ).read_text(encoding='utf-8' )
_UpperCAmelCase = [int(__lowerCAmelCase ) for number in data.strip().split(',' )]
_UpperCAmelCase = filter_valid_chars(__lowerCAmelCase )
for common_word in COMMON_WORDS:
_UpperCAmelCase = filter_common_word(__lowerCAmelCase , __lowerCAmelCase )
if len(__lowerCAmelCase ) == 1:
break
_UpperCAmelCase = possibles[0]
return sum(ord(__lowerCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 39
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_lowercase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Dict )-> Optional[int]:
super().__init__()
self.register_modules(unet=lowerCamelCase, scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self : Optional[int], lowerCamelCase : int = 1, lowerCamelCase : int = 100, lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None, lowerCamelCase : Optional[float] = None, lowerCamelCase : bool = True, )-> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
lowerCamelCase__ : int =self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase__ : List[str] =audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase__ : Any =2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCamelCase__ : Optional[int] =int(lowerCamelCase )
if sample_size % down_scale_factor != 0:
lowerCamelCase__ : Tuple =(
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
''' process.''' )
lowerCamelCase__ : int =int(lowerCamelCase )
lowerCamelCase__ : str =next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase__ : Union[str, Any] =(batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase, lowerCamelCase ) and len(lowerCamelCase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowerCamelCase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ : str =randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase )
# set step values
self.scheduler.set_timesteps(lowerCamelCase, device=audio.device )
lowerCamelCase__ : Any =self.scheduler.timesteps.to(lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase__ : str =self.unet(lowerCamelCase, lowerCamelCase ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase__ : Dict =self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase ).prev_sample
lowerCamelCase__ : Optional[int] =audio.clamp(-1, 1 ).float().cpu().numpy()
lowerCamelCase__ : List[Any] =audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase )
| 238
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class a ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self : Optional[int] ):
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = BlipImageProcessor()
UpperCAmelCase_ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
UpperCAmelCase_ = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
UpperCAmelCase_ = InstructBlipProcessor(__snake_case , __snake_case , __snake_case )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Union[str, Any] , **__snake_case : Optional[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__snake_case ).tokenizer
def lowerCamelCase_ ( self : Optional[int] , **__snake_case : Optional[int] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__snake_case ).image_processor
def lowerCamelCase_ ( self : Dict , **__snake_case : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__snake_case ).qformer_tokenizer
def lowerCamelCase_ ( self : List[str] ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : Optional[int] ):
UpperCAmelCase_ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
UpperCAmelCase_ = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self : Any ):
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCAmelCase_ = self.get_image_processor(do_normalize=__snake_case , padding_value=1.0 )
UpperCAmelCase_ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __snake_case )
self.assertIsInstance(processor.qformer_tokenizer , __snake_case )
def lowerCamelCase_ ( self : Union[str, Any] ):
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=__snake_case , image_processor=__snake_case , qformer_tokenizer=__snake_case )
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = image_processor(__snake_case , return_tensors='''np''' )
UpperCAmelCase_ = processor(images=__snake_case , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase_ ( self : Dict ):
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=__snake_case , image_processor=__snake_case , qformer_tokenizer=__snake_case )
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = processor(text=__snake_case )
UpperCAmelCase_ = tokenizer(__snake_case , return_token_type_ids=__snake_case )
UpperCAmelCase_ = qformer_tokenizer(__snake_case , return_token_type_ids=__snake_case )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def lowerCamelCase_ ( self : Optional[Any] ):
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=__snake_case , image_processor=__snake_case , qformer_tokenizer=__snake_case )
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def lowerCamelCase_ ( self : Optional[int] ):
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=__snake_case , image_processor=__snake_case , qformer_tokenizer=__snake_case )
UpperCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ = processor.batch_decode(__snake_case )
UpperCAmelCase_ = tokenizer.batch_decode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowerCamelCase_ ( self : Tuple ):
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=__snake_case , image_processor=__snake_case , qformer_tokenizer=__snake_case )
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 177
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_A )
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
lowerCAmelCase : ClassVar[Features] = Features({'audio': Audio()} )
lowerCAmelCase : ClassVar[Features] = Features({'labels': ClassLabel} )
lowerCAmelCase : str = "audio"
lowerCAmelCase : str = "labels"
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : List[Any] ):
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __snake_case ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
UpperCAmelCase_ = copy.deepcopy(self )
UpperCAmelCase_ = self.label_schema.copy()
UpperCAmelCase_ = features[self.label_column]
UpperCAmelCase_ = label_schema
return task_template
@property
def lowerCamelCase_ ( self : Tuple ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 177
| 1
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
SCREAMING_SNAKE_CASE_: Dict ='Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase_ = get_sagemaker_input()
else:
UpperCAmelCase_ = get_cluster_input()
return config
def lowerCAmelCase_ ( snake_case_ : Dict=None ) -> Union[str, Any]:
'''simple docstring'''
if subparsers is not None:
UpperCAmelCase_ = subparsers.add_parser("config" , description=snake_case_ )
else:
UpperCAmelCase_ = argparse.ArgumentParser("Accelerate config command" , description=snake_case_ )
parser.add_argument(
"--config_file" , default=snake_case_ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case_ )
return parser
def lowerCAmelCase_ ( snake_case_ : int ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = get_user_input()
if args.config_file is not None:
UpperCAmelCase_ = args.config_file
else:
if not os.path.isdir(snake_case_ ):
os.makedirs(snake_case_ )
UpperCAmelCase_ = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(snake_case_ )
else:
config.to_yaml_file(snake_case_ )
print(f"""accelerate configuration saved at {config_file}""" )
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = config_command_parser()
UpperCAmelCase_ = parser.parse_args()
config_command(snake_case_ )
if __name__ == "__main__":
main()
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__SCREAMING_SNAKE_CASE = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(default=a , metadata={"help": "The input training data file (a text file)."})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Overwrite the cached training and evaluation sets"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "The number of processes to use for the preprocessing."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _lowerCamelCase ( self) -> int:
if self.train_file is not None:
_A : Optional[int] = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_A : Dict = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def __call__( self , __lowerCamelCase) -> str:
_A : List[Any] = "label" if "label" in features[0].keys() else "labels"
_A : Any = [feature.pop(__lowerCamelCase) for feature in features]
_A : Optional[int] = len(__lowerCamelCase)
_A : int = len(features[0]["input_ids"])
_A : Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(__lowerCamelCase)] for feature in features
]
_A : str = list(chain(*__lowerCamelCase))
_A : Tuple = self.tokenizer.pad(
__lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
_A : Optional[int] = {k: v.view(__lowerCamelCase , __lowerCamelCase , -1) for k, v in batch.items()}
# Add back labels
_A : Optional[int] = torch.tensor(__lowerCamelCase , dtype=torch.intaa)
return batch
def _UpperCAmelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , UpperCamelCase__ , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A : int = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_A : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_A : List[str] = {}
if data_args.train_file is not None:
_A : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
_A : Tuple = data_args.validation_file
_A : Union[str, Any] = data_args.train_file.split("." )[-1]
_A : List[str] = load_dataset(
UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_A : Union[str, Any] = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_A : str = [f"ending{i}" for i in range(4 )]
_A : Union[str, Any] = "sent1"
_A : str = "sent2"
if data_args.max_seq_length is None:
_A : Any = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
_A : Optional[Any] = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
_A : int = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase__ : List[Any] ):
_A : List[Any] = [[context] * 4 for context in examples[context_name]]
_A : Any = examples[question_header_name]
_A : Union[str, Any] = [
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(UpperCamelCase__ )
]
# Flatten out
_A : Dict = list(chain(*UpperCamelCase__ ) )
_A : List[Any] = list(chain(*UpperCamelCase__ ) )
# Tokenize
_A : str = tokenizer(
UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_A : Optional[int] = raw_datasets["train"]
if data_args.max_train_samples is not None:
_A : Union[str, Any] = min(len(UpperCamelCase__ ) , data_args.max_train_samples )
_A : Any = train_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_A : Optional[int] = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_A : Optional[int] = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_A : str = min(len(UpperCamelCase__ ) , data_args.max_eval_samples )
_A : Dict = eval_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_A : List[str] = eval_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_A : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase__ : Tuple ):
_A , _A : List[str] = eval_predictions
_A : Optional[int] = np.argmax(UpperCamelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_A : List[str] = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
_A : Any = None
if training_args.resume_from_checkpoint is not None:
_A : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A : int = last_checkpoint
_A : Any = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
_A : Optional[int] = train_result.metrics
_A : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
_A : Tuple = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("train" , UpperCamelCase__ )
trainer.save_metrics("train" , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_A : List[Any] = trainer.evaluate()
_A : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
_A : Optional[Any] = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("eval" , UpperCamelCase__ )
trainer.save_metrics("eval" , UpperCamelCase__ )
_A : Tuple = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 11
| 0
|
"""simple docstring"""
from jiwer import compute_measures
import datasets
__A = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
__A = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
__A = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def snake_case ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(__UpperCAmelCase , __UpperCAmelCase )["wer"]
else:
lowerCAmelCase__ :List[str] = 0
lowerCAmelCase__ :Dict = 0
for prediction, reference in zip(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :Union[str, Any] = compute_measures(__UpperCAmelCase , __UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 254
|
"""simple docstring"""
from pathlib import Path
import fire
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ :List[str] = Path(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Any = Path(_SCREAMING_SNAKE_CASE )
dest_dir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
for path in src_dir.iterdir():
lowerCAmelCase__ :Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowerCAmelCase__ :Tuple = dest_dir.joinpath(path.name )
print(_SCREAMING_SNAKE_CASE )
dest_path.open('w' ).write('\n'.join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
fire.Fire(minify)
| 254
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : int
__UpperCAmelCase : Node | None =None
__UpperCAmelCase : Node | None =None
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = Node(1 )
__lowerCAmelCase = Node(2 )
__lowerCAmelCase = Node(3 )
__lowerCAmelCase = Node(4 )
__lowerCAmelCase = Node(5 )
return tree
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = []
if root is None:
return output
__lowerCAmelCase = deque([root] )
while process_queue:
__lowerCAmelCase = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = []
def populate_output(_UpperCamelCase , _UpperCamelCase ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(_UpperCamelCase , _UpperCamelCase )
return output
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = []
def populate_output(_UpperCamelCase , _UpperCamelCase ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(_UpperCamelCase , _UpperCamelCase )
return output
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if root is None:
return []
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = height(_UpperCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_UpperCamelCase , _UpperCamelCase ) )
__lowerCAmelCase = 1
else:
output.append(get_nodes_from_right_to_left(_UpperCamelCase , _UpperCamelCase ) )
__lowerCAmelCase = 0
return output
def _lowerCamelCase ( ): # Main function for testing.
'''simple docstring'''
__lowerCAmelCase = make_tree()
print(f"In-order Traversal: {inorder(_UpperCamelCase )}" )
print(f"Pre-order Traversal: {preorder(_UpperCamelCase )}" )
print(f"Post-order Traversal: {postorder(_UpperCamelCase )}" , "\n" )
print(f"Height of Tree: {height(_UpperCamelCase )}" , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(_UpperCamelCase ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(_UpperCamelCase ) + 1 ):
print(f"Level {level}:" , get_nodes_from_left_to_right(_UpperCamelCase , level=_UpperCamelCase ) )
print("\nZigZag order Traversal: " )
print(zigzag(_UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 57
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
__lowerCAmelCase : str =list[list[float | int]]
def UpperCAmelCase__ ( lowerCAmelCase__ :Matrix , lowerCAmelCase__ :Matrix ) -> Matrix:
'''simple docstring'''
lowercase = len(lowerCAmelCase__ )
lowercase = [[0 for _ in range(size + 1 )] for _ in range(lowerCAmelCase__ )]
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
for row in range(lowerCAmelCase__ ):
for col in range(lowerCAmelCase__ ):
lowercase = matrix[row][col]
lowercase = vector[row][0]
lowercase = 0
lowercase = 0
while row < size and col < size:
# pivoting
lowercase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowerCAmelCase__ , lowerCAmelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
lowercase , lowercase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowerCAmelCase__ ):
lowercase = augmented[rowa][col] / augmented[row][col]
lowercase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowerCAmelCase__ ):
for row in range(lowerCAmelCase__ ):
lowercase = augmented[row][col] / augmented[col][col]
for cola in range(lowerCAmelCase__ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 1_0 )] for row in range(lowerCAmelCase__ )
]
def UpperCAmelCase__ ( lowerCAmelCase__ :list[int] ) -> Callable[[int], int]:
'''simple docstring'''
lowercase = len(lowerCAmelCase__ )
lowercase = [[0 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
lowercase = [[0] for _ in range(lowerCAmelCase__ )]
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
for x_val, y_val in enumerate(lowerCAmelCase__ ):
for col in range(lowerCAmelCase__ ):
lowercase = (x_val + 1) ** (size - col - 1)
lowercase = y_val
lowercase = solve(lowerCAmelCase__ , lowerCAmelCase__ )
def interpolated_func(lowerCAmelCase__ :int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowerCAmelCase__ ) )
return interpolated_func
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**1_0
)
def UpperCAmelCase__ ( lowerCAmelCase__ :Callable[[int], int] = question_function , lowerCAmelCase__ :int = 1_0 ) -> int:
'''simple docstring'''
lowercase = [func(lowerCAmelCase__ ) for x_val in range(1 , order + 1 )]
lowercase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
lowercase = 0
lowercase = 42
lowercase = 42
for poly in polynomials:
lowercase = 1
while func(lowerCAmelCase__ ) == poly(lowerCAmelCase__ ):
x_val += 1
ret += poly(lowerCAmelCase__ )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 197
| 0
|
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
UpperCAmelCase_ = get_logger(__name__)
class lowercase__ ( enum.Enum ):
'''simple docstring'''
a : Union[str, Any] = "all_checks"
a : Any = "basic_checks"
a : Optional[Any] = "no_checks"
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[dict] , __UpperCAmelCase: dict , __UpperCAmelCase: List[str]=None ) -> str:
if expected_checksums is None:
logger.info('''Unable to verify checksums.''' )
return
if len(set(__UpperCAmelCase ) - set(__UpperCAmelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__UpperCAmelCase ) - set(__UpperCAmelCase ) ) )
if len(set(__UpperCAmelCase ) - set(__UpperCAmelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__UpperCAmelCase ) - set(__UpperCAmelCase ) ) )
UpperCamelCase__ : Optional[int] = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
UpperCamelCase__ : str = ''' for ''' + verification_name if verification_name is not None else ''''''
if len(__UpperCAmelCase ) > 0:
raise NonMatchingChecksumError(
f"Checksums didn't match{for_verification_name}:\n"
f"{bad_urls}\n"
'''Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error''' )
logger.info('''All the checksums matched successfully''' + for_verification_name )
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[dict] , __UpperCAmelCase: dict ) -> Optional[Any]:
if expected_splits is None:
logger.info('''Unable to verify splits sizes.''' )
return
if len(set(__UpperCAmelCase ) - set(__UpperCAmelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__UpperCAmelCase ) - set(__UpperCAmelCase ) ) )
if len(set(__UpperCAmelCase ) - set(__UpperCAmelCase ) ) > 0:
raise UnexpectedSplits(str(set(__UpperCAmelCase ) - set(__UpperCAmelCase ) ) )
UpperCamelCase__ : Tuple = [
{'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__UpperCAmelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__UpperCAmelCase ) )
logger.info('''All the splits matched successfully.''' )
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: bool = True ) -> dict:
if record_checksum:
UpperCamelCase__ : List[Any] = shaaaa()
with open(__UpperCAmelCase , '''rb''' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B'''''' ):
m.update(__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = m.hexdigest()
else:
UpperCamelCase__ : Optional[int] = None
return {"num_bytes": os.path.getsize(__UpperCAmelCase ), "checksum": checksum}
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple ) -> List[str]:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 359
|
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self, __magic_name__=0.01, __magic_name__=1000 ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = p_stop
UpperCamelCase__ : Any = max_length
def __iter__( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : Tuple = False
while not stop and count < self.max_length:
yield count
count += 1
UpperCamelCase__ : Union[str, Any] = random.random() < self.p_stop
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__=False, __magic_name__=True ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : str = [
BatchSamplerShard(__magic_name__, 2, __magic_name__, split_batches=__magic_name__, even_batches=__magic_name__ )
for i in range(2 )
]
UpperCamelCase__ : Union[str, Any] = [list(__magic_name__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__magic_name__ ) for shard in batch_sampler_shards], [len(__magic_name__ ) for e in expected] )
self.assertListEqual(__magic_name__, __magic_name__ )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
# Check the shards when the dataset is a round multiple of total batch size.
UpperCamelCase__ : Any = BatchSampler(range(24 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__ )
UpperCamelCase__ : List[str] = BatchSampler(range(24 ), batch_size=3, drop_last=__magic_name__ )
# Expected shouldn't change
self.check_batch_sampler_shards(__magic_name__, __magic_name__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCamelCase__ : Optional[int] = BatchSampler(range(21 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__ )
UpperCamelCase__ : List[str] = BatchSampler(range(21 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCamelCase__ : int = BatchSampler(range(22 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__ )
UpperCamelCase__ : Any = BatchSampler(range(22 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCamelCase__ : Union[str, Any] = BatchSampler(range(20 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__ )
UpperCamelCase__ : str = BatchSampler(range(20 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__ )
# Check the shards when the dataset is very small.
UpperCamelCase__ : Optional[int] = BatchSampler(range(2 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : int = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__magic_name__, __magic_name__ )
UpperCamelCase__ : Optional[int] = BatchSampler(range(2 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : List[Any] = [[], []]
self.check_batch_sampler_shards(__magic_name__, __magic_name__ )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
# Check the shards when the dataset is a round multiple of batch size.
UpperCamelCase__ : List[str] = BatchSampler(range(24 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__ )
UpperCamelCase__ : List[Any] = BatchSampler(range(24 ), batch_size=4, drop_last=__magic_name__ )
# Expected shouldn't change
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__ )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCamelCase__ : Tuple = BatchSampler(range(22 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__ )
UpperCamelCase__ : Dict = BatchSampler(range(22 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCamelCase__ : Dict = BatchSampler(range(21 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__ )
UpperCamelCase__ : Tuple = BatchSampler(range(21 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__ )
# Check the shards when the dataset is very small.
UpperCamelCase__ : List[str] = BatchSampler(range(2 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : str = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__ )
UpperCamelCase__ : Optional[Any] = BatchSampler(range(2 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : Tuple = [[], []]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__ )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
# Check the shards when the dataset is a round multiple of total batch size.
UpperCamelCase__ : List[Any] = BatchSampler(range(24 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, even_batches=__magic_name__ )
UpperCamelCase__ : Optional[int] = BatchSampler(range(24 ), batch_size=3, drop_last=__magic_name__ )
# Expected shouldn't change
self.check_batch_sampler_shards(__magic_name__, __magic_name__, even_batches=__magic_name__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCamelCase__ : Tuple = BatchSampler(range(21 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, even_batches=__magic_name__ )
UpperCamelCase__ : Optional[Any] = BatchSampler(range(21 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, even_batches=__magic_name__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCamelCase__ : Union[str, Any] = BatchSampler(range(22 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, even_batches=__magic_name__ )
UpperCamelCase__ : int = BatchSampler(range(22 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, even_batches=__magic_name__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCamelCase__ : List[Any] = BatchSampler(range(20 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, even_batches=__magic_name__ )
UpperCamelCase__ : Optional[int] = BatchSampler(range(20 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, even_batches=__magic_name__ )
# Check the shards when the dataset is very small.
UpperCamelCase__ : List[Any] = BatchSampler(range(2 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : Dict = [[[0, 1]], []]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, even_batches=__magic_name__ )
UpperCamelCase__ : Optional[Any] = BatchSampler(range(2 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : Tuple = [[], []]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, even_batches=__magic_name__ )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
# Check the shards when the dataset is a round multiple of batch size.
UpperCamelCase__ : int = BatchSampler(range(24 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__, even_batches=__magic_name__ )
UpperCamelCase__ : Dict = BatchSampler(range(24 ), batch_size=4, drop_last=__magic_name__ )
# Expected shouldn't change
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__, even_batches=__magic_name__ )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCamelCase__ : List[Any] = BatchSampler(range(22 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__, even_batches=__magic_name__ )
UpperCamelCase__ : Optional[int] = BatchSampler(range(22 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__, even_batches=__magic_name__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCamelCase__ : Any = BatchSampler(range(21 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__, even_batches=__magic_name__ )
UpperCamelCase__ : Tuple = BatchSampler(range(21 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__, even_batches=__magic_name__ )
# Check the shards when the dataset is very small.
UpperCamelCase__ : str = BatchSampler(range(2 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : Optional[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__, even_batches=__magic_name__ )
UpperCamelCase__ : Dict = BatchSampler(range(2 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : int = [[], []]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__, even_batches=__magic_name__ )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
UpperCamelCase__ : Optional[Any] = [BatchSamplerShard(__magic_name__, 2, __magic_name__, even_batches=__magic_name__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ), 3 )
self.assertEqual(len(batch_sampler_shards[1] ), 2 )
self.assertListEqual(list(batch_sampler_shards[0] ), [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ), [[3, 4], [9, 10, 11]] )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__=False, __magic_name__=2, __magic_name__=False ) -> str:
"""simple docstring"""
random.seed(__magic_name__ )
UpperCamelCase__ : Dict = list(__magic_name__ )
UpperCamelCase__ : Optional[int] = [
IterableDatasetShard(
__magic_name__, batch_size=__magic_name__, drop_last=__magic_name__, num_processes=__magic_name__, process_index=__magic_name__, split_batches=__magic_name__, )
for i in range(__magic_name__ )
]
UpperCamelCase__ : str = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__magic_name__ )
iterable_dataset_lists.append(list(__magic_name__ ) )
UpperCamelCase__ : Dict = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
UpperCamelCase__ : Dict = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__magic_name__ ), len(__magic_name__ ) )
self.assertTrue(len(__magic_name__ ) % shard_batch_size == 0 )
UpperCamelCase__ : int = []
for idx in range(0, len(__magic_name__ ), __magic_name__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__magic_name__ ) < len(__magic_name__ ):
reference += reference
self.assertListEqual(__magic_name__, reference[: len(__magic_name__ )] )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[Any] = 42
UpperCamelCase__ : Union[str, Any] = RandomIterableDataset()
self.check_iterable_dataset_shards(__magic_name__, __magic_name__, batch_size=4, drop_last=__magic_name__, split_batches=__magic_name__ )
self.check_iterable_dataset_shards(__magic_name__, __magic_name__, batch_size=4, drop_last=__magic_name__, split_batches=__magic_name__ )
self.check_iterable_dataset_shards(__magic_name__, __magic_name__, batch_size=4, drop_last=__magic_name__, split_batches=__magic_name__ )
self.check_iterable_dataset_shards(__magic_name__, __magic_name__, batch_size=4, drop_last=__magic_name__, split_batches=__magic_name__ )
# Edge case with a very small dataset
UpperCamelCase__ : Union[str, Any] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__magic_name__, __magic_name__, batch_size=4, drop_last=__magic_name__, split_batches=__magic_name__ )
self.check_iterable_dataset_shards(__magic_name__, __magic_name__, batch_size=4, drop_last=__magic_name__, split_batches=__magic_name__ )
self.check_iterable_dataset_shards(__magic_name__, __magic_name__, batch_size=4, drop_last=__magic_name__, split_batches=__magic_name__ )
self.check_iterable_dataset_shards(__magic_name__, __magic_name__, batch_size=4, drop_last=__magic_name__, split_batches=__magic_name__ )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : List[str] = BatchSampler(range(16 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : List[str] = SkipBatchSampler(__magic_name__, 2 )
self.assertListEqual(list(__magic_name__ ), [[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : List[str] = SkipDataLoader(list(range(16 ) ), batch_size=4, skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader], [[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = DataLoader(list(range(16 ) ), batch_size=4 )
UpperCamelCase__ : Dict = skip_first_batches(__magic_name__, num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader], [[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Any = DataLoaderShard(list(range(16 ) ), batch_size=4 )
for idx, _ in enumerate(__magic_name__ ):
self.assertEqual(dataloader.end_of_dataloader, idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__magic_name__ ):
self.assertEqual(dataloader.end_of_dataloader, idx == 3 )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
Accelerator()
UpperCamelCase__ : Tuple = DataLoaderDispatcher(range(16 ), batch_size=4 )
for idx, _ in enumerate(__magic_name__ ):
self.assertEqual(dataloader.end_of_dataloader, idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__magic_name__ ):
self.assertEqual(dataloader.end_of_dataloader, idx == 3 )
| 247
| 0
|
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
__UpperCamelCase =''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(SCREAMING_SNAKE_CASE__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62
|
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : str ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
UpperCAmelCase : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__snake_case , cache_dir=__snake_case )
UpperCAmelCase : str = [t[-1] for t in os.walk(os.path.join(__snake_case , os.listdir(__snake_case )[0] , '''snapshots''' ) )]
UpperCAmelCase : str = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[str] ) -> Dict:
UpperCAmelCase , UpperCAmelCase : str = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__snake_case )
UpperCAmelCase : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : List[str] = jax.random.PRNGKey(0 )
UpperCAmelCase : Optional[Any] = 4
UpperCAmelCase : Optional[Any] = jax.device_count()
UpperCAmelCase : Tuple = num_samples * [prompt]
UpperCAmelCase : int = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Tuple = replicate(__snake_case )
UpperCAmelCase : Any = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Optional[Any] = shard(__snake_case )
UpperCAmelCase : Optional[int] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3
assert np.abs(np.abs(__snake_case , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
UpperCAmelCase : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__snake_case ) == num_samples
def A ( self : List[Any] ) -> List[str]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=__snake_case )
UpperCAmelCase : Dict = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : Optional[Any] = jax.random.PRNGKey(0 )
UpperCAmelCase : Any = 50
UpperCAmelCase : Union[str, Any] = jax.device_count()
UpperCAmelCase : int = num_samples * [prompt]
UpperCAmelCase : Union[str, Any] = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Dict = replicate(__snake_case )
UpperCAmelCase : int = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Tuple = shard(__snake_case )
UpperCAmelCase : Tuple = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def A ( self : int ) -> Dict:
UpperCAmelCase , UpperCAmelCase : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case )
UpperCAmelCase : Dict = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : Union[str, Any] = jax.random.PRNGKey(0 )
UpperCAmelCase : List[str] = 50
UpperCAmelCase : Union[str, Any] = jax.device_count()
UpperCAmelCase : List[Any] = num_samples * [prompt]
UpperCAmelCase : int = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Tuple = replicate(__snake_case )
UpperCAmelCase : List[Any] = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Optional[int] = shard(__snake_case )
UpperCAmelCase : Optional[Any] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def A ( self : int ) -> Any:
UpperCAmelCase , UpperCAmelCase : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
UpperCAmelCase : List[str] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : List[str] = jax.random.PRNGKey(0 )
UpperCAmelCase : Union[str, Any] = 50
UpperCAmelCase : Optional[int] = jax.device_count()
UpperCAmelCase : List[str] = num_samples * [prompt]
UpperCAmelCase : Dict = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Tuple = replicate(__snake_case )
UpperCAmelCase : Any = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : str = shard(__snake_case )
UpperCAmelCase : Optional[int] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def A ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : int = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , set_alpha_to_one=__snake_case , steps_offset=1 , )
UpperCAmelCase , UpperCAmelCase : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=__snake_case , safety_checker=__snake_case , )
UpperCAmelCase : Tuple = scheduler.create_state()
UpperCAmelCase : Dict = scheduler_state
UpperCAmelCase : str = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : int = jax.random.PRNGKey(0 )
UpperCAmelCase : Union[str, Any] = 50
UpperCAmelCase : Optional[Any] = jax.device_count()
UpperCAmelCase : Any = num_samples * [prompt]
UpperCAmelCase : Dict = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : str = replicate(__snake_case )
UpperCAmelCase : List[str] = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Optional[int] = shard(__snake_case )
UpperCAmelCase : Dict = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def A ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : Union[str, Any] = jax.device_count()
UpperCAmelCase : List[Any] = num_samples * [prompt]
UpperCAmelCase : str = jax.random.split(jax.random.PRNGKey(0 ) , __snake_case )
UpperCAmelCase , UpperCAmelCase : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case , )
UpperCAmelCase : Dict = replicate(__snake_case )
UpperCAmelCase : Optional[Any] = pipeline.prepare_inputs(__snake_case )
UpperCAmelCase : List[str] = shard(__snake_case )
UpperCAmelCase : Any = pipeline(__snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase : Optional[int] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
UpperCAmelCase , UpperCAmelCase : Any = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case , use_memory_efficient_attention=__snake_case , )
UpperCAmelCase : int = replicate(__snake_case )
UpperCAmelCase : int = pipeline.prepare_inputs(__snake_case )
UpperCAmelCase : List[Any] = shard(__snake_case )
UpperCAmelCase : Optional[Any] = pipeline(__snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase : int = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 23
| 0
|
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Tuple:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCAmelCase =flax_key_tuple[:-1] + ("""weight""",)
_lowerCAmelCase =torch.permute(__UpperCamelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__UpperCamelCase ):
# linear layer
_lowerCAmelCase =flax_key_tuple[:-1] + ("""weight""",)
_lowerCAmelCase =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCAmelCase =flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
if "metadata" in layer:
_lowerCAmelCase =layer.split("""metadata""" )
_lowerCAmelCase ="""""".join(split_layer[0] )[:-1]
_lowerCAmelCase =[tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
_lowerCAmelCase =layer.split("""kvstore""" )
_lowerCAmelCase ="""""".join(split_layer[0] )[:-1]
_lowerCAmelCase =[tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
_lowerCAmelCase =layer.split("""/""" )
_lowerCAmelCase ="""/""".join(split_layer[:-1] )
_lowerCAmelCase =(split_layer[-1],)
if "kvstore/path" in layer:
_lowerCAmelCase =F'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
_lowerCAmelCase ="""file"""
else:
_lowerCAmelCase =checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Dict:
_lowerCAmelCase =rename_keys(__UpperCamelCase )
_lowerCAmelCase ={}
for k, v in current_block.items():
_lowerCAmelCase =v
_lowerCAmelCase =new_current_block
torch.save(__UpperCamelCase , __UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = WEIGHTS_NAME ) -> Tuple:
_lowerCAmelCase =convert_file_size_to_int(__UpperCamelCase )
_lowerCAmelCase =[]
_lowerCAmelCase ={}
_lowerCAmelCase =0
_lowerCAmelCase =0
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
_lowerCAmelCase =serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
_lowerCAmelCase =flatten_dict(__UpperCamelCase , sep="""/""" )
_lowerCAmelCase ={}
for layer in checkpoint_info.keys():
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =get_key_and_tensorstore_dict(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if curr_real_layer_name in all_layers:
_lowerCAmelCase =content
else:
_lowerCAmelCase ={split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCAmelCase =ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCAmelCase =torch.tensor(__UpperCamelCase )
_lowerCAmelCase =raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCAmelCase , _lowerCAmelCase =rename_base_flax_keys(tuple(key.split("""/""" ) ) , __UpperCamelCase )
_lowerCAmelCase ="""/""".join(__UpperCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCAmelCase =os.path.join(
__UpperCamelCase , weights_name.replace(""".bin""" , F'''-{len(__UpperCamelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(__UpperCamelCase , __UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCAmelCase ={}
_lowerCAmelCase =0
_lowerCAmelCase =raw_weights.to(getattr(__UpperCamelCase , __UpperCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCAmelCase =os.path.join(__UpperCamelCase , weights_name.replace(""".bin""" , F'''-{len(__UpperCamelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(__UpperCamelCase , __UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__UpperCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCAmelCase ={}
_lowerCAmelCase ={}
for idx, shard in enumerate(__UpperCamelCase ):
_lowerCAmelCase =weights_name.replace(
""".bin""" , F'''-{idx+1:05d}-of-{len(__UpperCamelCase ):05d}.bin''' ) # len(sharded_state_dicts):05d}
_lowerCAmelCase =os.path.join(__UpperCamelCase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__UpperCamelCase , os.path.join(__UpperCamelCase , __UpperCamelCase ) )
_lowerCAmelCase =shard
for key in shard:
_lowerCAmelCase =shard_file
# Add the metadata
_lowerCAmelCase ={"""total_size""": total_size}
_lowerCAmelCase ={"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(__UpperCamelCase , __UpperCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
_lowerCAmelCase =json.dumps(__UpperCamelCase , indent=2 , sort_keys=__UpperCamelCase ) + """\n"""
f.write(__UpperCamelCase )
return metadata, index
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__A = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _lowerCamelCase() -> Optional[Any]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCAmelCase =SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
_lowerCAmelCase =SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
_lowerCAmelCase =TaTokenizer.from_pretrained("""t5-small""" )
_lowerCAmelCase ="""A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
_lowerCAmelCase =tokenizer(__UpperCamelCase , return_tensors="""pt""" ).input_ids
_lowerCAmelCase =model.generate(__UpperCamelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 341
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341
| 1
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
lowercase__: List[str] = a[left_index]
lowercase__: Optional[int] = left_index + 1
for j in range(left_index + 1 , __UpperCAmelCase ):
if a[j] < pivot:
lowercase__, lowercase__: Optional[int] = a[i], a[j]
i += 1
lowercase__, lowercase__: Optional[Any] = a[i - 1], a[left_index]
return i - 1
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
if left < right:
lowercase__: Optional[int] = random.randint(__UpperCAmelCase , right - 1 )
lowercase__, lowercase__: Dict = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
lowercase__: List[str] = partition(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
quick_sort_random(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__UpperCAmelCase , pivot_index + 1 , __UpperCAmelCase ) # recursive quicksort to the right of the pivot point
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
lowercase__: int = input('''Enter numbers separated by a comma:\n''' ).strip()
lowercase__: Tuple = [int(__UpperCAmelCase ) for item in user_input.split(''',''' )]
quick_sort_random(__UpperCAmelCase , 0 , len(__UpperCAmelCase ) )
print(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 177
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
if n == 1 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
lowercase__: List[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
lowercase__: Union[str, Any] = 0
lowercase__: List[Any] = 2
while digits < n:
index += 1
lowercase__: Dict = len(str(fibonacci(__UpperCAmelCase ) ) )
return index
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 1_0_0_0 ) -> int:
return fibonacci_digits_index(__UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 177
| 1
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase = 60_08_51_47_51_43) -> int:
try:
a = int(__UpperCamelCase)
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int.")
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one.")
a = 2
a = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
a = i
while n % i == 0:
a = n // i
i += 1
return int(__UpperCamelCase)
if __name__ == "__main__":
print(F'{solution() = }')
| 180
|
from manim import *
class a__ ( UpperCamelCase__ ):
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
a = Rectangle(height=0.5 , width=0.5 )
a = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
a = Rectangle(height=0.2_5 , width=0.2_5 )
a = [mem.copy() for i in range(6 )]
a = [mem.copy() for i in range(6 )]
a = VGroup(*A ).arrange(A , buff=0 )
a = VGroup(*A ).arrange(A , buff=0 )
a = VGroup(A , A ).arrange(A , buff=0 )
a = Text("CPU" , font_size=24 )
a = Group(A , A ).arrange(A , buff=0.5 , aligned_edge=A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A )
a = [mem.copy() for i in range(4 )]
a = VGroup(*A ).arrange(A , buff=0 )
a = Text("GPU" , font_size=24 )
a = Group(A , A ).arrange(A , buff=0.5 , aligned_edge=A )
gpu.move_to([-1, -1, 0] )
self.add(A )
a = [mem.copy() for i in range(6 )]
a = VGroup(*A ).arrange(A , buff=0 )
a = Text("Model" , font_size=24 )
a = Group(A , A ).arrange(A , buff=0.5 , aligned_edge=A )
model.move_to([3, -1.0, 0] )
self.add(A )
a = []
a = []
for i, rect in enumerate(A ):
a = fill.copy().set_fill(A , opacity=0.8 )
target.move_to(A )
model_arr.append(A )
a = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(A , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(A )
self.add(*A , *A )
a = [meta_mem.copy() for i in range(6 )]
a = [meta_mem.copy() for i in range(6 )]
a = VGroup(*A ).arrange(A , buff=0 )
a = VGroup(*A ).arrange(A , buff=0 )
a = VGroup(A , A ).arrange(A , buff=0 )
a = Text("Disk" , font_size=24 )
a = Group(A , A ).arrange(A , buff=0.5 , aligned_edge=A )
disk.move_to([-4, -1.2_5, 0] )
self.add(A , A )
a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(A , A )
a = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(A , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(A )
a = MarkupText(
F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A ) )
a = Square(0.3 )
input.set_fill(A , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , A , buff=0.5 )
self.play(Write(A ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=A , buff=0.0_2 )
self.play(MoveToTarget(A ) )
self.play(FadeOut(A ) )
a = Arrow(start=A , end=A , color=A , buff=0.5 )
a.next_to(model_arr[0].get_left() , A , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
a = MarkupText(
F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A , run_time=3 ) )
a = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.0_2}
self.play(
Write(A ) , Circumscribe(model_arr[0] , color=A , **A ) , Circumscribe(model_cpu_arr[0] , color=A , **A ) , Circumscribe(gpu_rect[0] , color=A , **A ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
a = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.0_2 , A , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.0_2 )
a = AnimationGroup(
FadeOut(A , run_time=0.5 ) , MoveToTarget(A , run_time=0.5 ) , FadeIn(A , run_time=0.5 ) , lag_ratio=0.2 )
self.play(A )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
a = 0.7
self.play(
Circumscribe(model_arr[i] , **A ) , Circumscribe(cpu_left_col_base[i] , **A ) , Circumscribe(cpu_left_col_base[i + 1] , color=A , **A ) , Circumscribe(gpu_rect[0] , color=A , **A ) , Circumscribe(model_arr[i + 1] , color=A , **A ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=A , **A ) , Circumscribe(cpu_left_col_base[-1] , color=A , **A ) , Circumscribe(gpu_rect[0] , color=A , **A ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
a = a_c
a = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 )
self.play(
FadeOut(A ) , FadeOut(A , run_time=0.5 ) , )
a = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(A , run_time=3 ) , MoveToTarget(A ) )
self.wait()
| 180
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["torch", "transformers", "onnx"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __A ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __A ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = ["torch", "transformers", "onnx"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __A ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __A ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = ["torch", "transformers", "onnx"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __A ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __A ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = ["torch", "transformers", "onnx"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __A ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __A ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[str] = ["torch", "transformers", "onnx"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __A ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __A ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = ["torch", "transformers", "onnx"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __A ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def __A ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
| 254
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_UpperCamelCase = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
_UpperCamelCase = {
'''unc-nlp/lxmert-base-uncased''': 512,
}
_UpperCamelCase = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : List[str] = LxmertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Dict:
'''simple docstring'''
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCAmelCase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __UpperCAmelCase ) != tokenize_chinese_chars
):
__UpperCAmelCase : Any = getattr(__UpperCAmelCase , normalizer_state.pop("""type""" ) )
__UpperCAmelCase : Optional[Any] = do_lower_case
__UpperCAmelCase : Optional[Any] = strip_accents
__UpperCAmelCase : str = tokenize_chinese_chars
__UpperCAmelCase : str = normalizer_class(**__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = do_lower_case
def __A ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
__UpperCAmelCase : Tuple = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 254
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 351
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 42
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCAmelCase = 16 , UpperCAmelCase = 88 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 32 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = True , UpperCAmelCase = True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase_ = num_attention_heads
lowercase_ = attention_head_dim
lowercase_ = num_attention_heads * attention_head_dim
lowercase_ = in_channels
lowercase_ = torch.nn.GroupNorm(num_groups=UpperCAmelCase , num_channels=UpperCAmelCase , eps=1e-6 , affine=UpperCAmelCase )
lowercase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
# 3. Define transformers blocks
lowercase_ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , dropout=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , activation_fn=UpperCAmelCase , attention_bias=UpperCAmelCase , double_self_attention=UpperCAmelCase , norm_elementwise_affine=UpperCAmelCase , )
for d in range(UpperCAmelCase )
] )
lowercase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=1 , UpperCAmelCase=None , UpperCAmelCase = True , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = hidden_states.shape
lowercase_ = batch_frames // num_frames
lowercase_ = hidden_states
lowercase_ = hidden_states[None, :].reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowercase_ = self.norm(UpperCAmelCase )
lowercase_ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCAmelCase , UpperCAmelCase )
lowercase_ = self.proj_in(UpperCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowercase_ = block(
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , class_labels=UpperCAmelCase , )
# 3. Output
lowercase_ = self.proj_out(UpperCAmelCase )
lowercase_ = (
hidden_states[None, None, :]
.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowercase_ = hidden_states.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCAmelCase )
| 297
| 0
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_lowerCAmelCase = '''\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'''
class lowerCAmelCase_( A_ ):
'''simple docstring'''
@staticmethod
def UpperCAmelCase_ ( __UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Optional[Any] = parser.add_parser(
"""convert""" ,help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" ,)
train_parser.add_argument("""--model_type""" ,type=snake_case_ ,required=snake_case_ ,help="""Model's type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" ,type=snake_case_ ,required=snake_case_ ,help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" ,type=snake_case_ ,required=snake_case_ ,help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" ,type=snake_case_ ,default="""""" ,help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" ,type=snake_case_ ,default=snake_case_ ,help="""Optional fine-tuning task name if the TF model was a finetuned model.""" ,)
train_parser.set_defaults(func=snake_case_ )
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,*__UpperCAmelCase ,) -> List[str]:
lowerCAmelCase__ : int = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(F"""Loading model {model_type}""" )
lowerCAmelCase__ : List[Any] = model_type
lowerCAmelCase__ : List[Any] = tf_checkpoint
lowerCAmelCase__ : Tuple = pytorch_dump_output
lowerCAmelCase__ : Optional[Any] = config
lowerCAmelCase__ : Tuple = finetuning_task_name
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
if "ckpt" in self._tf_checkpoint.lower():
lowerCAmelCase__ : str = self._tf_checkpoint
lowerCAmelCase__ : List[str] = """"""
else:
lowerCAmelCase__ : int = self._tf_checkpoint
lowerCAmelCase__ : List[Any] = """"""
convert_transfo_xl_checkpoint_to_pytorch(
snake_case_ ,self._config ,self._pytorch_dump_output ,snake_case_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint ,self._config ,self._pytorch_dump_output ,self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint ,self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint ,self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 37
|
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , ) -> List[Any]:
A__ = bnb_quantization_config.load_in_abit
A__ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
A__ = []
# custom device map
if isinstance(lowercase_ , lowercase_ ) and len(device_map.keys() ) > 1:
A__ = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A__ = get_keys_to_not_convert(lowercase_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(lowercase_ )
A__ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A__ = []
A__ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(lowercase_ )
# compatibility with peft
A__ = load_in_abit
A__ = load_in_abit
A__ = get_parameter_device(lowercase_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
A__ = replace_with_bnb_layers(lowercase_ , lowercase_ , modules_to_not_convert=lowercase_ )
# convert param to the right dtype
A__ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A__ = name.replace(".weight" , "" ).replace(".bias" , "" )
A__ = getattr(lowercase_ , lowercase_ , lowercase_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(lowercase_ ):
param.to(lowercase_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
A__ = replace_with_bnb_layers(
lowercase_ , lowercase_ , modules_to_not_convert=lowercase_ )
A__ = get_quantized_model_device_map(
lowercase_ , lowercase_ , lowercase_ , max_memory=lowercase_ , no_split_module_classes=lowercase_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A__ = True
A__ = any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
lowercase_ , lowercase_ , lowercase_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=lowercase_ , offload_state_dict=lowercase_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(lowercase_ , device_map=lowercase_ , offload_dir=lowercase_ )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None ) -> List[str]:
if device_map is None:
if torch.cuda.is_available():
A__ = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(lowercase_ , lowercase_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
A__ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A__ = {}
A__ = special_dtypes
A__ = no_split_module_classes
A__ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A__ = get_balanced_memory(
lowercase_ , low_zero=(device_map == "balanced_low_0") , max_memory=lowercase_ , **lowercase_ , )
A__ = max_memory
A__ = infer_auto_device_map(lowercase_ , **lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
# check if don't have any quantized module on the cpu
A__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A__ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=None , lowercase_=None ) -> Optional[int]:
if modules_to_not_convert is None:
A__ = []
A__, A__ = _replace_with_bnb_layers(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , ) -> Optional[int]:
A__ = False
for name, module in model.named_children():
if current_key_name is None:
A__ = []
current_key_name.append(lowercase_ )
if isinstance(lowercase_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A__ = ".".join(lowercase_ )
A__ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A__ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A__ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=lowercase_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
A__ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
A__ = module.weight.data
if module.bias is not None:
A__ = module.bias.data
bnb_module.requires_grad_(lowercase_ )
setattr(lowercase_ , lowercase_ , lowercase_ )
A__ = True
if len(list(module.children() ) ) > 0:
A__, A__ = _replace_with_bnb_layers(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
A__ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
# Create a copy of the model
with init_empty_weights():
A__ = deepcopy(lowercase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A__ = find_tied_parameters(lowercase_ )
# For compatibility with Accelerate < 0.18
if isinstance(lowercase_ , lowercase_ ):
A__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A__ = sum(lowercase_ , [] )
A__ = len(lowercase_ ) > 0
# Check if it is a base model
A__ = False
if hasattr(lowercase_ , "base_model_prefix" ):
A__ = not hasattr(lowercase_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A__ = list(model.named_children() )
A__ = [list_modules[-1][0]]
# add last module together with tied weights
A__ = set(lowercase_ ) - set(lowercase_ )
A__ = list(set(lowercase_ ) ) + list(lowercase_ )
# remove ".weight" from the keys
A__ = [".weight", ".bias"]
A__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A__ = name.replace(lowercase_ , "" )
filtered_module_names.append(lowercase_ )
return filtered_module_names
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
for m in model.modules():
if isinstance(lowercase_ , bnb.nn.Linearabit ):
return True
return False
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[Any]:
return next(parameter.parameters() ).device
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(lowercase_ , lowercase_ , 0 , dtype=lowercase_ , value=lowercase_ )
A__ = param_name
A__ = model
if "." in tensor_name:
A__ = tensor_name.split("." )
for split in splits[:-1]:
A__ = getattr(lowercase_ , lowercase_ )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
A__ = new_module
A__ = splits[-1]
# offload weights
A__ = False
offload_weight(module._parameters[tensor_name] , lowercase_ , lowercase_ , index=lowercase_ )
if hasattr(module._parameters[tensor_name] , "SCB" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , lowercase_ , index=lowercase_ , )
else:
offload_weight(lowercase_ , lowercase_ , lowercase_ , index=lowercase_ )
offload_weight(lowercase_ , param_name.replace("weight" , "SCB" ) , lowercase_ , index=lowercase_ )
set_module_tensor_to_device(lowercase_ , lowercase_ , "meta" , dtype=lowercase_ , value=torch.empty(*param.size() ) )
| 247
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "ctrl"
_UpperCAmelCase :int = ["past_key_values"]
_UpperCAmelCase :Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _UpperCAmelCase=246534 , _UpperCAmelCase=256 , _UpperCAmelCase=1280 , _UpperCAmelCase=8192 , _UpperCAmelCase=48 , _UpperCAmelCase=16 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
lowercase__: Union[str, Any] = vocab_size
lowercase__: Optional[int] = n_positions
lowercase__: Optional[int] = n_embd
lowercase__: Any = n_layer
lowercase__: Any = n_head
lowercase__: int = dff
lowercase__: Dict = resid_pdrop
lowercase__: Any = embd_pdrop
lowercase__: Any = layer_norm_epsilon
lowercase__: Optional[int] = initializer_range
lowercase__: Dict = use_cache
super().__init__(**_UpperCAmelCase )
| 2
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "beit"
def __init__( self , _UpperCAmelCase=8192 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=True , _UpperCAmelCase=[3, 5, 7, 11] , _UpperCAmelCase=[1, 2, 3, 6] , _UpperCAmelCase=True , _UpperCAmelCase=0.4 , _UpperCAmelCase=256 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=255 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Union[str, Any] = vocab_size
lowercase__: List[Any] = hidden_size
lowercase__: Optional[int] = num_hidden_layers
lowercase__: Optional[int] = num_attention_heads
lowercase__: int = intermediate_size
lowercase__: List[str] = hidden_act
lowercase__: List[Any] = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: List[str] = initializer_range
lowercase__: Optional[int] = layer_norm_eps
lowercase__: int = image_size
lowercase__: Tuple = patch_size
lowercase__: int = num_channels
lowercase__: Optional[Any] = use_mask_token
lowercase__: List[Any] = use_absolute_position_embeddings
lowercase__: Optional[int] = use_relative_position_bias
lowercase__: Optional[int] = use_shared_relative_position_bias
lowercase__: Optional[Any] = layer_scale_init_value
lowercase__: Union[str, Any] = drop_path_rate
lowercase__: Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__: Tuple = out_indices
lowercase__: Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__: List[str] = use_auxiliary_head
lowercase__: Optional[Any] = auxiliary_loss_weight
lowercase__: str = auxiliary_channels
lowercase__: List[str] = auxiliary_num_convs
lowercase__: Tuple = auxiliary_concat_input
lowercase__: Dict = semantic_loss_ignore_index
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = version.parse("1.11" )
@property
def _snake_case ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case ( self ):
return 1e-4
| 2
| 1
|
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_snake_case = flax_key_tuple[:-1] + ("""weight""",)
_snake_case = torch.permute(_SCREAMING_SNAKE_CASE , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_SCREAMING_SNAKE_CASE ):
# linear layer
_snake_case = flax_key_tuple[:-1] + ("""weight""",)
_snake_case = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_snake_case = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if "metadata" in layer:
_snake_case = layer.split("""metadata""" )
_snake_case = """""".join(split_layer[0] )[:-1]
_snake_case = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
_snake_case = layer.split("""kvstore""" )
_snake_case = """""".join(split_layer[0] )[:-1]
_snake_case = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
_snake_case = layer.split("""/""" )
_snake_case = """/""".join(split_layer[:-1] )
_snake_case = (split_layer[-1],)
if "kvstore/path" in layer:
_snake_case = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
_snake_case = """file"""
else:
_snake_case = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = rename_keys(_SCREAMING_SNAKE_CASE )
_snake_case = {}
for k, v in current_block.items():
_snake_case = v
_snake_case = new_current_block
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = WEIGHTS_NAME ):
_snake_case = convert_file_size_to_int(_SCREAMING_SNAKE_CASE )
_snake_case = []
_snake_case = {}
_snake_case = 0
_snake_case = 0
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
_snake_case = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
_snake_case = flatten_dict(_SCREAMING_SNAKE_CASE , sep="""/""" )
_snake_case = {}
for layer in checkpoint_info.keys():
_snake_case, _snake_case, _snake_case = get_key_and_tensorstore_dict(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if curr_real_layer_name in all_layers:
_snake_case = content
else:
_snake_case = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_snake_case = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_snake_case = torch.tensor(_SCREAMING_SNAKE_CASE )
_snake_case = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_snake_case, _snake_case = rename_base_flax_keys(tuple(key.split("""/""" ) ) , _SCREAMING_SNAKE_CASE )
_snake_case = """/""".join(_SCREAMING_SNAKE_CASE )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_snake_case = os.path.join(
_SCREAMING_SNAKE_CASE , weights_name.replace(""".bin""" , f"""-{len(_SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin""" ) )
rename_and_save_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(current_block.keys() )
del current_block
_snake_case = {}
_snake_case = 0
_snake_case = raw_weights.to(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_snake_case = os.path.join(_SCREAMING_SNAKE_CASE , weights_name.replace(""".bin""" , f"""-{len(_SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin""" ) )
rename_and_save_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_SCREAMING_SNAKE_CASE ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_snake_case = {}
_snake_case = {}
for idx, shard in enumerate(_SCREAMING_SNAKE_CASE ):
_snake_case = weights_name.replace(
""".bin""" , f"""-{idx+1:05d}-of-{len(_SCREAMING_SNAKE_CASE ):05d}.bin""" ) # len(sharded_state_dicts):05d}
_snake_case = os.path.join(_SCREAMING_SNAKE_CASE , weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
_snake_case = shard
for key in shard:
_snake_case = shard_file
# Add the metadata
_snake_case = {"""total_size""": total_size}
_snake_case = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , """w""" , encoding="""utf-8""" ) as f:
_snake_case = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + """\n"""
f.write(_SCREAMING_SNAKE_CASE )
return metadata, index
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__lowerCAmelCase = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __SCREAMING_SNAKE_CASE ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_snake_case = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
_snake_case = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
_snake_case = TaTokenizer.from_pretrained("""t5-small""" )
_snake_case = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
_snake_case = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).input_ids
_snake_case = model.generate(_SCREAMING_SNAKE_CASE , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 341
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=2 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=10 , UpperCAmelCase=3 , UpperCAmelCase=32 * 4 , UpperCAmelCase=32 * 6 , UpperCAmelCase=4 , UpperCAmelCase=32 , ) -> Optional[Any]:
_snake_case = parent
_snake_case = batch_size
_snake_case = is_training
_snake_case = use_auxiliary_loss
_snake_case = num_queries
_snake_case = num_channels
_snake_case = min_size
_snake_case = max_size
_snake_case = num_labels
_snake_case = mask_feature_size
def lowercase (self ) -> str:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCAmelCase )
_snake_case = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCAmelCase )
_snake_case = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCAmelCase ) > 0.5
).float()
_snake_case = (torch.rand((self.batch_size, self.num_labels) , device=UpperCAmelCase ) > 0.5).long()
_snake_case = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase (self ) -> Tuple:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase (self ) -> Optional[Any]:
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case = self.prepare_config_and_inputs()
_snake_case = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> int:
_snake_case = output.encoder_hidden_states
_snake_case = output.pixel_decoder_hidden_states
_snake_case = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCAmelCase ) , config.decoder_config.decoder_layers )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Union[str, Any]:
with torch.no_grad():
_snake_case = MaskFormerModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_snake_case = model(pixel_values=UpperCAmelCase , pixel_mask=UpperCAmelCase )
_snake_case = model(UpperCAmelCase , output_hidden_states=UpperCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCAmelCase , UpperCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
_snake_case = MaskFormerForInstanceSegmentation(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
def comm_check_on_output(UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_snake_case = model(pixel_values=UpperCAmelCase , pixel_mask=UpperCAmelCase )
_snake_case = model(UpperCAmelCase )
comm_check_on_output(UpperCAmelCase )
_snake_case = model(
pixel_values=UpperCAmelCase , pixel_mask=UpperCAmelCase , mask_labels=UpperCAmelCase , class_labels=UpperCAmelCase )
comm_check_on_output(UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowerCAmelCase_ = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowercase (self ) -> int:
_snake_case = MaskFormerModelTester(self )
_snake_case = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def lowercase (self ) -> int:
self.config_tester.run_common_tests()
def lowercase (self ) -> List[Any]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCAmelCase , **UpperCAmelCase , output_hidden_states=UpperCAmelCase )
def lowercase (self ) -> Any:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCAmelCase )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def lowercase (self ) -> Optional[Any]:
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def lowercase (self ) -> Optional[int]:
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def lowercase (self ) -> int:
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def lowercase (self ) -> Optional[int]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowercase (self ) -> Optional[Any]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase (self ) -> Tuple:
pass
def lowercase (self ) -> List[str]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(UpperCAmelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
@slow
def lowercase (self ) -> int:
for model_name in ["facebook/maskformer-swin-small-coco"]:
_snake_case = MaskFormerModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def lowercase (self ) -> Tuple:
_snake_case = (self.model_tester.min_size,) * 2
_snake_case = {
"""pixel_values""": torch.randn((2, 3, *size) , device=UpperCAmelCase ),
"""mask_labels""": torch.randn((2, 10, *size) , device=UpperCAmelCase ),
"""class_labels""": torch.zeros(2 , 10 , device=UpperCAmelCase ).long(),
}
_snake_case = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCAmelCase )
_snake_case = model(**UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def lowercase (self ) -> Dict:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCAmelCase , **UpperCAmelCase , output_hidden_states=UpperCAmelCase )
def lowercase (self ) -> List[str]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(UpperCAmelCase ).to(UpperCAmelCase )
_snake_case = model(**UpperCAmelCase , output_attentions=UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def lowercase (self ) -> Tuple:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_snake_case = self.all_model_classes[1]
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
_snake_case = model(UpperCAmelCase , mask_labels=UpperCAmelCase , class_labels=UpperCAmelCase ).loss
loss.backward()
def lowercase (self ) -> List[str]:
# only MaskFormerForInstanceSegmentation has the loss
_snake_case = self.all_model_classes[1]
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = True
_snake_case = True
_snake_case = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
_snake_case = model(UpperCAmelCase , mask_labels=UpperCAmelCase , class_labels=UpperCAmelCase )
_snake_case = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_snake_case = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_snake_case = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_snake_case = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowerCAmelCase = 1E-4
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase (self ) -> Optional[int]:
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def lowercase (self ) -> str:
_snake_case = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(UpperCAmelCase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
_snake_case = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
_snake_case = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
_snake_case = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
_snake_case = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def lowercase (self ) -> List[str]:
_snake_case = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(UpperCAmelCase )
.eval()
)
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
_snake_case = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
# masks_queries_logits
_snake_case = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_snake_case = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
_snake_case = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
# class_queries_logits
_snake_case = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_snake_case = torch.tensor(
[
[1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0],
[3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0],
[1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0],
] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def lowercase (self ) -> List[Any]:
_snake_case = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(UpperCAmelCase )
.eval()
)
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
_snake_case = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
# masks_queries_logits
_snake_case = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_snake_case = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
_snake_case = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
# class_queries_logits
_snake_case = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_snake_case = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def lowercase (self ) -> Tuple:
_snake_case = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(UpperCAmelCase )
.eval()
)
_snake_case = self.default_image_processor
_snake_case = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
_snake_case = inputs["""pixel_values"""].to(UpperCAmelCase )
_snake_case = [el.to(UpperCAmelCase ) for el in inputs["""mask_labels"""]]
_snake_case = [el.to(UpperCAmelCase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 341
| 1
|
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_A : Union[str, Any] = logging.getLogger(__name__)
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
# save results
if os.path.exists(__UpperCAmelCase ):
if os.path.exists(os.path.join(__UpperCAmelCase , """config.json""" ) ) and os.path.isfile(
os.path.join(__UpperCAmelCase , """config.json""" ) ):
os.remove(os.path.join(__UpperCAmelCase , """config.json""" ) )
if os.path.exists(os.path.join(__UpperCAmelCase , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(__UpperCAmelCase , """pytorch_model.bin""" ) ):
os.remove(os.path.join(__UpperCAmelCase , """pytorch_model.bin""" ) )
else:
os.makedirs(__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase=False ) -> List[Any]:
lowerCAmelCase__ : int = 2
if unlogit:
lowerCAmelCase__ : Union[str, Any] = torch.pow(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : str = p * torch.log(__UpperCAmelCase )
lowerCAmelCase__ : Dict = 0
return -plogp.sum(dim=-1 )
def lowercase_ ( __UpperCAmelCase ) -> Optional[Any]:
logger.info("""lv, h >\t""" + """\t""".join(f"""{x + 1}""" for x in range(len(__UpperCAmelCase ) ) ) )
for row in range(len(__UpperCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(f"""layer {row + 1}:\t""" + """\t""".join(f"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(f"""layer {row + 1}:\t""" + """\t""".join(f"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=False ) -> str:
lowerCAmelCase__ : int = model.config.num_hidden_layers, model.config.num_attention_heads
lowerCAmelCase__ : List[str] = torch.zeros(__UpperCAmelCase , __UpperCAmelCase ).to(args.device )
lowerCAmelCase__ : List[Any] = torch.zeros(__UpperCAmelCase , __UpperCAmelCase ).to(args.device )
if head_mask is None:
lowerCAmelCase__ : Any = torch.ones(__UpperCAmelCase , __UpperCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__UpperCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCAmelCase__ : int = None
lowerCAmelCase__ : Optional[int] = 0.0
lowerCAmelCase__ : Optional[Any] = 0.0
for step, inputs in enumerate(tqdm(__UpperCAmelCase , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
lowerCAmelCase__ : Tuple = tuple(t.to(args.device ) for t in inputs )
(lowerCAmelCase__ ) : List[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase , labels=__UpperCAmelCase , head_mask=__UpperCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCAmelCase__ : Optional[Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ : int = entropy(attn.detach() , __UpperCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__UpperCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCAmelCase__ : Any = 2
lowerCAmelCase__ : Any = torch.pow(torch.pow(__UpperCAmelCase , __UpperCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
lowerCAmelCase__ : int = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(__UpperCAmelCase )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(__UpperCAmelCase )
logger.info("""Head ranked by importance scores""" )
lowerCAmelCase__ : Any = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowerCAmelCase__ : List[Any] = torch.arange(
head_importance.numel() , device=args.device )
lowerCAmelCase__ : int = head_ranks.view_as(__UpperCAmelCase )
print_ad_tensor(__UpperCAmelCase )
return attn_entropy, head_importance, total_loss
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : List[Any] = compute_heads_importance(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , compute_entropy=__UpperCAmelCase )
lowerCAmelCase__ : List[str] = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , __UpperCAmelCase , original_score * args.masking_threshold )
lowerCAmelCase__ : Union[str, Any] = torch.ones_like(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowerCAmelCase__ : Union[str, Any] = original_score
while current_score >= original_score * args.masking_threshold:
lowerCAmelCase__ : Dict = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCAmelCase__ : List[Any] = float("""Inf""" )
lowerCAmelCase__ : List[str] = head_importance.view(-1 ).sort()[1]
if len(__UpperCAmelCase ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
lowerCAmelCase__ : Union[str, Any] = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
lowerCAmelCase__ : Any = new_head_mask.view(-1 )
lowerCAmelCase__ : Optional[int] = 0.0
lowerCAmelCase__ : Optional[int] = new_head_mask.view_as(__UpperCAmelCase )
lowerCAmelCase__ : str = new_head_mask.clone().detach()
print_ad_tensor(__UpperCAmelCase )
# Compute metric and head importance again
lowerCAmelCase__ : Optional[Any] = compute_heads_importance(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , compute_entropy=__UpperCAmelCase , head_mask=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , __UpperCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("""Final head mask""" )
print_ad_tensor(__UpperCAmelCase )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Any = datetime.now()
lowerCAmelCase__ : int = compute_heads_importance(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , compute_entropy=__UpperCAmelCase , compute_importance=__UpperCAmelCase , head_mask=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = 1 / loss
lowerCAmelCase__ : Optional[Any] = datetime.now() - before_time
lowerCAmelCase__ : str = sum(p.numel() for p in model.parameters() )
lowerCAmelCase__ : Optional[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__UpperCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = [
v,
]
assert sum(len(__UpperCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__UpperCAmelCase )
lowerCAmelCase__ : Dict = sum(p.numel() for p in model.parameters() )
lowerCAmelCase__ : Any = datetime.now()
lowerCAmelCase__ : Union[str, Any] = compute_heads_importance(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , compute_entropy=__UpperCAmelCase , compute_importance=__UpperCAmelCase , head_mask=__UpperCAmelCase , actually_pruned=__UpperCAmelCase , )
lowerCAmelCase__ : Dict = 1 / loss
lowerCAmelCase__ : Dict = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , __UpperCAmelCase , __UpperCAmelCase , pruned_num_params / original_num_params * 100 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , __UpperCAmelCase , __UpperCAmelCase )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 100 )
save_model(__UpperCAmelCase , args.output_dir )
def lowercase_ ( ) -> Optional[int]:
lowerCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=__UpperCAmelCase , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=__UpperCAmelCase , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=__UpperCAmelCase , type=__UpperCAmelCase , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=__UpperCAmelCase , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=__UpperCAmelCase , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=__UpperCAmelCase , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=__UpperCAmelCase , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__UpperCAmelCase , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=__UpperCAmelCase , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=__UpperCAmelCase , default=42 )
parser.add_argument("""--local_rank""" , type=__UpperCAmelCase , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=__UpperCAmelCase , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=__UpperCAmelCase , default="""""" , help="""Can be used for distant debugging.""" )
lowerCAmelCase__ : Union[str, Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__UpperCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCAmelCase__ : Tuple = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
lowerCAmelCase__ : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowerCAmelCase__ : Optional[Any] = torch.device("""cuda""" , args.local_rank )
lowerCAmelCase__ : Dict = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowerCAmelCase__ : Dict = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowerCAmelCase__ : List[str] = nn.parallel.DistributedDataParallel(
__UpperCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__UpperCAmelCase )
elif args.n_gpu > 1:
lowerCAmelCase__ : List[str] = nn.DataParallel(__UpperCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__UpperCAmelCase )
torch.save(__UpperCAmelCase , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , __UpperCAmelCase )
# Prepare dataset
lowerCAmelCase__ : int = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowerCAmelCase__ : Any = (torch.from_numpy(__UpperCAmelCase ),)
lowerCAmelCase__ : List[str] = TensorDataset(*__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = RandomSampler(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = DataLoader(__UpperCAmelCase , sampler=__UpperCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCAmelCase__ : Optional[int] = mask_heads(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
prune_heads(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
main()
| 370
|
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_A = """base_with_context"""
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
lowerCAmelCase__ : int = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCAmelCase__ : str = weights[f"""layers_{lyr_num}"""]
lowerCAmelCase__ : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : str = ly_weight["""attention"""]
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ : int = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Dict = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[Any] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCAmelCase__ : int = weights[f"""layers_{lyr_num}"""]
lowerCAmelCase__ : Any = ly_weight["""attention"""]
lowerCAmelCase__ : int = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ : Any = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Any = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
lowerCAmelCase__ : Any = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
lowerCAmelCase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCAmelCase )
lowerCAmelCase__ : Dict = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowerCAmelCase__ : List[Any] = weights[f"""layers_{lyr_num}"""]
lowerCAmelCase__ : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : Tuple = ly_weight["""self_attention"""]
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[Any] = ly_weight["""MultiHeadDotProductAttention_0"""]
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : int = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
lowerCAmelCase__ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def lowercase_ ( __UpperCAmelCase ) -> str:
lowerCAmelCase__ : Optional[int] = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowerCAmelCase__ : Optional[int] = jnp.tree_util.tree_map(onp.array , __UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
lowerCAmelCase__ : Dict = os.path.join(args.checkpoint_path , """..""" , """config.gin""" )
lowerCAmelCase__ : Tuple = inference.parse_training_gin_file(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Any = inference.InferenceModel(args.checkpoint_path , __UpperCAmelCase )
lowerCAmelCase__ : List[Any] = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" )
lowerCAmelCase__ : List[Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
lowerCAmelCase__ : List[str] = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
lowerCAmelCase__ : Optional[int] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowerCAmelCase__ : Optional[Any] = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , __UpperCAmelCase )
lowerCAmelCase__ : List[str] = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , __UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , __UpperCAmelCase )
lowerCAmelCase__ : Any = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
lowerCAmelCase__ : Optional[Any] = SpectrogramDiffusionPipeline(
notes_encoder=__UpperCAmelCase , continuous_encoder=__UpperCAmelCase , decoder=__UpperCAmelCase , scheduler=__UpperCAmelCase , melgan=__UpperCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=f"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
_A = parser.parse_args()
main(args)
| 212
| 0
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_SCREAMING_SNAKE_CASE = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def snake_case ( snake_case__ :int , snake_case__ :Union[str, Any]=None) -> int:
require_version(deps[pkg] , snake_case__)
| 180
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
_A = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_60_00,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
_A = tempfile.mkdtemp()
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A = os.path.join(self.tmpdirname , lowerCAmelCase_ )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
# load decoder from hub
_A = """hf-internal-testing/ngram-beam-search-decoder"""
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Tuple:
_A = self.add_kwargs_tokens_map.copy()
kwargs.update(lowerCAmelCase_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Any:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Any:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.get_tokenizer()
_A = self.get_feature_extractor()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
_A = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowerCAmelCase_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_A = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(lowerCAmelCase_ , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=lowerCAmelCase_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def UpperCAmelCase ( self ) -> Any:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = floats_list((3, 10_00) )
_A = feature_extractor(lowerCAmelCase_ , return_tensors="""np""" )
_A = processor(lowerCAmelCase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self ) -> Any:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = """This is a test string"""
_A = processor(text=lowerCAmelCase_ )
_A = tokenizer(lowerCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self , lowerCAmelCase_=(2, 10, 16) , lowerCAmelCase_=77 ) -> Tuple:
np.random.seed(lowerCAmelCase_ )
return np.random.rand(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_A = processor.decode(lowerCAmelCase_ )
_A = decoder.decode_beams(lowerCAmelCase_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_A = processor.batch_decode(lowerCAmelCase_ )
else:
with get_context(lowerCAmelCase_ ).Pool() as pool:
_A = processor.batch_decode(lowerCAmelCase_ , lowerCAmelCase_ )
_A = list(lowerCAmelCase_ )
with get_context("""fork""" ).Pool() as p:
_A = decoder.decode_beams_batch(lowerCAmelCase_ , lowerCAmelCase_ )
_A , _A , _A = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(lowerCAmelCase_ , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(lowerCAmelCase_ , decoded_processor.logit_score )
self.assertListEqual(lowerCAmelCase_ , decoded_processor.lm_score )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = self._get_dummy_logits()
_A = 15
_A = -20.0
_A = -4.0
_A = processor.batch_decode(
lowerCAmelCase_ , beam_width=lowerCAmelCase_ , beam_prune_logp=lowerCAmelCase_ , token_min_logp=lowerCAmelCase_ , )
_A = decoded_processor_out.text
_A = list(lowerCAmelCase_ )
with get_context("""fork""" ).Pool() as pool:
_A = decoder.decode_beams_batch(
lowerCAmelCase_ , lowerCAmelCase_ , beam_width=lowerCAmelCase_ , beam_prune_logp=lowerCAmelCase_ , token_min_logp=lowerCAmelCase_ , )
_A = [d[0][0] for d in decoded_decoder_out]
_A = [d[0][2] for d in decoded_decoder_out]
_A = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , lowerCAmelCase_ )
self.assertTrue(np.array_equal(lowerCAmelCase_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , lowerCAmelCase_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(lowerCAmelCase_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , lowerCAmelCase_ , atol=1E-3 ) )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = self._get_dummy_logits()
_A = 2.0
_A = 5.0
_A = -20.0
_A = True
_A = processor.batch_decode(
lowerCAmelCase_ , alpha=lowerCAmelCase_ , beta=lowerCAmelCase_ , unk_score_offset=lowerCAmelCase_ , lm_score_boundary=lowerCAmelCase_ , )
_A = decoded_processor_out.text
_A = list(lowerCAmelCase_ )
decoder.reset_params(
alpha=lowerCAmelCase_ , beta=lowerCAmelCase_ , unk_score_offset=lowerCAmelCase_ , lm_score_boundary=lowerCAmelCase_ , )
with get_context("""fork""" ).Pool() as pool:
_A = decoder.decode_beams_batch(
lowerCAmelCase_ , lowerCAmelCase_ , )
_A = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , lowerCAmelCase_ )
_A = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_A = processor.decoder.model_container[processor.decoder._model_key]
_A = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_A = os.listdir(lowerCAmelCase_ )
_A = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
_A = snapshot_download("""hf-internal-testing/processor_with_lm""" )
_A = WavaVecaProcessorWithLM.from_pretrained(lowerCAmelCase_ )
_A = processor.decoder.model_container[processor.decoder._model_key]
_A = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_A = os.listdir(lowerCAmelCase_ )
_A = os.listdir(lowerCAmelCase_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
_A = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_A = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_A = floats_list((3, 10_00) )
_A = processor_wavaveca(lowerCAmelCase_ , return_tensors="""np""" )
_A = processor_auto(lowerCAmelCase_ , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
_A = self._get_dummy_logits()
_A = processor_wavaveca.batch_decode(lowerCAmelCase_ )
_A = processor_auto.batch_decode(lowerCAmelCase_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_A = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase ( self ) -> Any:
_A = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_A = self._get_dummy_logits()[0]
_A = processor.decode(lowerCAmelCase_ , output_word_offsets=lowerCAmelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def UpperCAmelCase ( self ) -> Any:
_A = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_A = self._get_dummy_logits()
_A = processor.batch_decode(lowerCAmelCase_ , output_word_offsets=lowerCAmelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(lowerCAmelCase_ , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCAmelCase ( self ) -> Any:
import torch
_A = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=lowerCAmelCase_ )
_A = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_60_00 ) )
_A = iter(lowerCAmelCase_ )
_A = next(lowerCAmelCase_ )
_A = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
_A = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_A = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
_A = model(lowerCAmelCase_ ).logits.cpu().numpy()
_A = processor.decode(logits[0] , output_word_offsets=lowerCAmelCase_ )
_A = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_A = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
_A = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(lowerCAmelCase_ , """word""" ) ) , lowerCAmelCase_ )
self.assertEqual(""" """.join(self.get_from_offsets(lowerCAmelCase_ , """word""" ) ) , output.text )
# output times
_A = torch.tensor(self.get_from_offsets(lowerCAmelCase_ , """start_time""" ) )
_A = torch.tensor(self.get_from_offsets(lowerCAmelCase_ , """end_time""" ) )
# fmt: off
_A = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_A = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=0.01 ) )
self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=0.01 ) )
| 180
| 1
|
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __UpperCAmelCase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = StableUnCLIPPipeline
__lowerCAmelCase = TEXT_TO_IMAGE_PARAMS
__lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__lowerCAmelCase = False
def A (self : Optional[Any] ):
A = 32
A = embedder_hidden_size
# prior components
torch.manual_seed(0 )
A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
A = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCAmelCase , projection_dim=_lowerCAmelCase , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
A = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowerCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
A = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=_lowerCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
A = StableUnCLIPImageNormalizer(embedding_dim=_lowerCAmelCase )
A = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
A = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
A = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCAmelCase , layers_per_block=1 , upcast_attention=_lowerCAmelCase , use_linear_projection=_lowerCAmelCase , )
torch.manual_seed(0 )
A = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=_lowerCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
A = AutoencoderKL()
A = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def A (self : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : int=0 ):
if str(_lowerCAmelCase ).startswith("""mps""" ):
A = torch.manual_seed(_lowerCAmelCase )
else:
A = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
A = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def A (self : List[str] ):
A = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=_lowerCAmelCase )
def A (self : List[Any] ):
A = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=_lowerCAmelCase )
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A (self : Tuple ):
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
A = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A = torch.Generator(device="""cpu""" ).manual_seed(0 )
A = pipe("""anime turle""" , generator=_lowerCAmelCase , output_type="""np""" )
A = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
def A (self : Dict ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
A = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
A = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 337
|
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowerCamelCase : Dict = 'src/diffusers'
_lowerCamelCase : Dict = '.'
# This is to make sure the diffusers module imported is the one in the repo.
_lowerCamelCase : List[str] = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowerCamelCase : Tuple = spec.loader.load_module()
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
return line.startswith(UpperCAmelCase ) or len(UpperCAmelCase ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , UpperCAmelCase ) is not None
def __a ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
A = object_name.split(""".""" )
A = 0
# First let's find the module where our object lives.
A = parts[i]
while i < len(UpperCAmelCase ) and not os.path.isfile(os.path.join(UpperCAmelCase , f"""{module}.py""" ) ):
i += 1
if i < len(UpperCAmelCase ):
A = os.path.join(UpperCAmelCase , parts[i] )
if i >= len(UpperCAmelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(UpperCAmelCase , f"""{module}.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A = f.readlines()
# Now let's find the class / func in the code!
A = """"""
A = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
A = line_index
while line_index < len(UpperCAmelCase ) and _should_continue(lines[line_index] , UpperCAmelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A = lines[start_index:line_index]
return "".join(UpperCAmelCase )
_lowerCamelCase : str = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
_lowerCamelCase : Any = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
_lowerCamelCase : str = re.compile(R'<FILL\s+[^>]*>')
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
A = code.split("""\n""" )
A = 0
while idx < len(UpperCAmelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCAmelCase ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def __a ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
A = len(get_indent(UpperCAmelCase ) ) > 0
if has_indent:
A = f"""class Bla:\n{code}"""
A = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCAmelCase )
A = black.format_str(UpperCAmelCase , mode=UpperCAmelCase )
A , A = style_docstrings_in_code(UpperCAmelCase )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def __a ( UpperCAmelCase , UpperCAmelCase=False ) ->List[str]:
"""simple docstring"""
with open(UpperCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A = f.readlines()
A = []
A = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase ):
A = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
A , A , A = search.groups()
A = find_code_in_diffusers(UpperCAmelCase )
A = get_indent(UpperCAmelCase )
A = line_index + 1 if indent == theoretical_indent else line_index + 2
A = theoretical_indent
A = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
A = True
while line_index < len(UpperCAmelCase ) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase ):
break
A = lines[line_index]
A = _should_continue(UpperCAmelCase , UpperCAmelCase ) and re.search(f"""^{indent}# End copy""" , UpperCAmelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A = lines[start_index:line_index]
A = """""".join(UpperCAmelCase )
# Remove any nested `Copied from` comments to avoid circular copies
A = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(UpperCAmelCase ) is None]
A = """\n""".join(UpperCAmelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase ) > 0:
A = replace_pattern.replace("""with""" , """""" ).split(""",""" )
A = [_re_replace_pattern.search(UpperCAmelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
A , A , A = pattern.groups()
A = re.sub(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if option.strip() == "all-casing":
A = re.sub(obja.lower() , obja.lower() , UpperCAmelCase )
A = re.sub(obja.upper() , obja.upper() , UpperCAmelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
A = blackify(lines[start_index - 1] + theoretical_code )
A = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
A = lines[:start_index] + [theoretical_code] + lines[line_index:]
A = start_index + 1
if overwrite and len(UpperCAmelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase )
return diffs
def __a ( UpperCAmelCase = False ) ->int:
"""simple docstring"""
A = glob.glob(os.path.join(UpperCAmelCase , """**/*.py""" ) , recursive=UpperCAmelCase )
A = []
for filename in all_files:
A = is_copy_consistent(UpperCAmelCase , UpperCAmelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(UpperCAmelCase ) > 0:
A = """\n""".join(UpperCAmelCase )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_lowerCamelCase : Any = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 337
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 104
|
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase: Dict = logging.get_logger(__name__)
lowerCAmelCase: str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase: List[Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
lowerCAmelCase: str = {
'allenai/led-base-16384': 1_6_3_8_4,
}
class a__( lowerCamelCase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = LEDTokenizer
lowercase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __snake_case : Optional[Any]=None , __snake_case : List[str]=None , __snake_case : Tuple=None , __snake_case : Dict="replace" , __snake_case : int="<s>" , __snake_case : Any="</s>" , __snake_case : Optional[Any]="</s>" , __snake_case : Optional[Any]="<s>" , __snake_case : Optional[Any]="<unk>" , __snake_case : List[str]="<pad>" , __snake_case : int="<mask>" , __snake_case : int=False , __snake_case : str=True , **__snake_case : Tuple , ):
super().__init__(
__snake_case , __snake_case , tokenizer_file=__snake_case , errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , trim_offsets=__snake_case , **__snake_case , )
a : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __snake_case ) != add_prefix_space:
a : List[Any] = getattr(__snake_case , pre_tok_state.pop('type' ) )
a : Optional[Any] = add_prefix_space
a : Optional[Any] = pre_tok_class(**__snake_case )
a : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a : Dict = 'post_processor'
a : int = getattr(self.backend_tokenizer , __snake_case , __snake_case )
if tokenizer_component_instance:
a : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a : Any = tuple(state['sep'] )
if "cls" in state:
a : Any = tuple(state['cls'] )
a : Optional[Any] = False
if state.get('add_prefix_space' , __snake_case ) != add_prefix_space:
a : Any = add_prefix_space
a : Optional[Any] = True
if state.get('trim_offsets' , __snake_case ) != trim_offsets:
a : List[Any] = trim_offsets
a : Union[str, Any] = True
if changes_to_apply:
a : int = getattr(__snake_case , state.pop('type' ) )
a : List[Any] = component_class(**__snake_case )
setattr(self.backend_tokenizer , __snake_case , __snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowercase_ ( self : Dict ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self : Dict , __snake_case : List[str] ):
a : Tuple = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else value
a : Optional[int] = value
def lowercase_ ( self : Optional[Any] , *__snake_case : Any , **__snake_case : Union[str, Any] ):
a : Dict = kwargs.get('is_split_into_words' , __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*__snake_case , **__snake_case )
def lowercase_ ( self : Union[str, Any] , *__snake_case : Optional[int] , **__snake_case : List[str] ):
a : Optional[int] = kwargs.get('is_split_into_words' , __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*__snake_case , **__snake_case )
def lowercase_ ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ):
a : Union[str, Any] = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : int=None ):
a : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
a : int = [self.sep_token_id]
a : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self : List[str] , __snake_case : Union[Dict[str, EncodedInput], BatchEncoding] , __snake_case : Optional[int] = None , __snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ):
a : Optional[Any] = super()._pad(
encoded_inputs=__snake_case , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
# Load from model defaults
if return_attention_mask is None:
a : str = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a : Union[str, Any] = len(encoded_inputs['global_attention_mask'] ) != len(__snake_case )
if needs_to_be_padded:
a : str = len(__snake_case ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a : Dict = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
a : Union[str, Any] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 297
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase_ : int = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = ["""pixel_values"""]
def __init__( self ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = 1 / 255 ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = True ,**_SCREAMING_SNAKE_CASE ,) -> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
_snake_case = size if size is not None else {"shortest_edge": 224}
_snake_case = get_size_dict(_SCREAMING_SNAKE_CASE ,default_to_square=_SCREAMING_SNAKE_CASE )
_snake_case = crop_size if crop_size is not None else {"height": 224, "width": 224}
_snake_case = get_size_dict(_SCREAMING_SNAKE_CASE ,default_to_square=_SCREAMING_SNAKE_CASE ,param_name="crop_size" )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_snake_case = image_std if image_std is not None else OPENAI_CLIP_STD
_snake_case = do_convert_rgb
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray:
_snake_case = get_size_dict(_SCREAMING_SNAKE_CASE ,default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_snake_case = get_resize_output_image_size(_SCREAMING_SNAKE_CASE ,size=size["shortest_edge"] ,default_to_square=_SCREAMING_SNAKE_CASE )
return resize(_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ,resample=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray:
_snake_case = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_SCREAMING_SNAKE_CASE ,size=(size["height"], size["width"]) ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> int:
return rescale(_SCREAMING_SNAKE_CASE ,scale=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE ,mean=_SCREAMING_SNAKE_CASE ,std=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = ChannelDimension.FIRST ,**_SCREAMING_SNAKE_CASE ,) -> PIL.Image.Image:
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(_SCREAMING_SNAKE_CASE ,param_name="size" ,default_to_square=_SCREAMING_SNAKE_CASE )
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(_SCREAMING_SNAKE_CASE ,param_name="crop_size" ,default_to_square=_SCREAMING_SNAKE_CASE )
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_snake_case = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_snake_case = [convert_to_rgb(_SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
_snake_case = [self.resize(image=_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ,resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
_snake_case = [self.center_crop(image=_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=_SCREAMING_SNAKE_CASE ,scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=_SCREAMING_SNAKE_CASE ,mean=_SCREAMING_SNAKE_CASE ,std=_SCREAMING_SNAKE_CASE ) for image in images]
_snake_case = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for image in images]
_snake_case = {"pixel_values": images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE ,tensor_type=_SCREAMING_SNAKE_CASE )
| 142
|
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( _UpperCamelCase: Dict , _UpperCamelCase: Optional[int] , _UpperCamelCase: List[str] ) -> Optional[Any]:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __a ( _UpperCamelCase: List[Any] , _UpperCamelCase: Optional[Any] , _UpperCamelCase: Dict , _UpperCamelCase: Optional[Any]="attention" ) -> Any:
"""simple docstring"""
_snake_case = _snake_case = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
_snake_case = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
_snake_case = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
_snake_case = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
_snake_case = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
_snake_case = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
_snake_case = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
_snake_case = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __a ( _UpperCamelCase: Tuple , _UpperCamelCase: Optional[int] , _UpperCamelCase: Optional[int] , _UpperCamelCase: Optional[int]=False ) -> List[Any]:
"""simple docstring"""
if split_mlp_wi:
_snake_case = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
_snake_case = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
_snake_case = (wi_a, wi_a)
else:
_snake_case = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
_snake_case = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __a ( _UpperCamelCase: Optional[int] , _UpperCamelCase: Dict , _UpperCamelCase: Union[str, Any] , _UpperCamelCase: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __a ( _UpperCamelCase: dict , *, _UpperCamelCase: int , _UpperCamelCase: bool , _UpperCamelCase: bool = False ) -> str:
"""simple docstring"""
_snake_case = traverse_util.flatten_dict(variables["target"] )
_snake_case = {"/".join(_UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_snake_case = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , _UpperCamelCase )
_snake_case = collections.OrderedDict()
# Shared embeddings.
_snake_case = old["token_embedder/embedding"]
# Encoder.
for i in range(_UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_snake_case = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , "encoder" , "pre_attention_layer_norm" )
_snake_case , _snake_case , _snake_case , _snake_case = tax_attention_lookup(_UpperCamelCase , _UpperCamelCase , "encoder" , "attention" )
_snake_case = layer_norm
_snake_case = k.T
_snake_case = o.T
_snake_case = q.T
_snake_case = v.T
# Block i, layer 1 (MLP).
_snake_case = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , "encoder" , "pre_mlp_layer_norm" )
_snake_case , _snake_case = tax_mlp_lookup(_UpperCamelCase , _UpperCamelCase , "encoder" , _UpperCamelCase )
_snake_case = layer_norm
if split_mlp_wi:
_snake_case = wi[0].T
_snake_case = wi[1].T
else:
_snake_case = wi.T
_snake_case = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_snake_case = tax_relpos_bias_lookup(
_UpperCamelCase , _UpperCamelCase , "encoder" ).T
_snake_case = old["encoder/encoder_norm/scale"]
if not scalable_attention:
_snake_case = tax_relpos_bias_lookup(
_UpperCamelCase , 0 , "encoder" ).T
_snake_case = tax_relpos_bias_lookup(
_UpperCamelCase , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(_UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_snake_case = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , "pre_self_attention_layer_norm" )
_snake_case , _snake_case , _snake_case , _snake_case = tax_attention_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , "self_attention" )
_snake_case = layer_norm
_snake_case = k.T
_snake_case = o.T
_snake_case = q.T
_snake_case = v.T
# Block i, layer 1 (Cross Attention).
_snake_case = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , "pre_cross_attention_layer_norm" )
_snake_case , _snake_case , _snake_case , _snake_case = tax_attention_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , "encoder_decoder_attention" )
_snake_case = layer_norm
_snake_case = k.T
_snake_case = o.T
_snake_case = q.T
_snake_case = v.T
# Block i, layer 2 (MLP).
_snake_case = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , "pre_mlp_layer_norm" )
_snake_case , _snake_case = tax_mlp_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , _UpperCamelCase )
_snake_case = layer_norm
if split_mlp_wi:
_snake_case = wi[0].T
_snake_case = wi[1].T
else:
_snake_case = wi.T
_snake_case = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_snake_case = tax_relpos_bias_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" ).T
_snake_case = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_snake_case = old["decoder/logits_dense/kernel"].T
return new
def __a ( _UpperCamelCase: Any , _UpperCamelCase: bool ) -> Dict:
"""simple docstring"""
_snake_case = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_snake_case = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_snake_case = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
_snake_case = state_dict["shared.weight"]
return state_dict
def __a ( _UpperCamelCase: str , _UpperCamelCase: List[str] , _UpperCamelCase: Any , _UpperCamelCase: str , _UpperCamelCase: List[Any] ) -> Dict:
"""simple docstring"""
_snake_case = checkpoints.load_tax_checkpoint(_UpperCamelCase )
_snake_case = convert_tax_to_pytorch(
_UpperCamelCase , num_layers=config.num_layers , is_encoder_only=_UpperCamelCase , scalable_attention=_UpperCamelCase )
_snake_case = make_state_dict(_UpperCamelCase , _UpperCamelCase )
model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
def __a ( _UpperCamelCase: Union[str, Any] , _UpperCamelCase: Union[str, Any] , _UpperCamelCase: Optional[Any] , _UpperCamelCase: bool = False , _UpperCamelCase: bool = False , ) -> Dict:
"""simple docstring"""
_snake_case = MTaConfig.from_json_file(_UpperCamelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_snake_case = UMTaEncoderModel(_UpperCamelCase )
else:
_snake_case = UMTaForConditionalGeneration(_UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_UpperCamelCase )
print("Done" )
if __name__ == "__main__":
UpperCamelCase_ : Any = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
UpperCamelCase_ : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 142
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : int = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Dict = """ctrl"""
lowerCAmelCase__ : List[Any] = ["""past_key_values"""]
lowerCAmelCase__ : str = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__(self : Tuple , UpperCamelCase : Optional[Any]=246534 , UpperCamelCase : Union[str, Any]=256 , UpperCamelCase : Optional[int]=1280 , UpperCamelCase : Any=8192 , UpperCamelCase : List[str]=48 , UpperCamelCase : List[Any]=16 , UpperCamelCase : Any=0.1 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : str=1E-6 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : Dict=True , **UpperCamelCase : Dict , ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = n_positions
lowercase__ = n_embd
lowercase__ = n_layer
lowercase__ = n_head
lowercase__ = dff
lowercase__ = resid_pdrop
lowercase__ = embd_pdrop
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = use_cache
super().__init__(**UpperCamelCase )
| 2
|
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCamelCase : Optional[Any] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
lowerCamelCase : Tuple = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
lowerCamelCase : Dict = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
lowerCamelCase : Any = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
lowerCamelCase : Tuple = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
lowerCamelCase : Optional[int] = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
lowerCamelCase : Dict = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
"""simple docstring"""
lowercase__ ,lowercase__ = randrange(len(A ) ), randrange(len(A ) )
lowercase__ = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
lowercase__ ,lowercase__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _SCREAMING_SNAKE_CASE (A = 100 ) -> str:
"""simple docstring"""
return (generate_random_hand() for _ in range(A ))
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> List[str]:
"""simple docstring"""
assert PokerHand(A )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(A )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , A )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Any:
"""simple docstring"""
lowercase__ = PokerHand(A )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Tuple:
"""simple docstring"""
assert PokerHand(A )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(A )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected
def _SCREAMING_SNAKE_CASE () -> Tuple:
"""simple docstring"""
lowercase__ = [PokerHand(A ) for hand in SORTED_HANDS]
lowercase__ = poker_hands.copy()
shuffle(A )
lowercase__ = chain(sorted(A ) )
for index, hand in enumerate(A ):
assert hand == poker_hands[index]
def _SCREAMING_SNAKE_CASE () -> List[Any]:
"""simple docstring"""
lowercase__ = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=A )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _SCREAMING_SNAKE_CASE () -> int:
"""simple docstring"""
lowercase__ = PokerHand('''2C 4S AS 3D 5C''' )
lowercase__ = True
lowercase__ = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
"""simple docstring"""
lowercase__ = 0
lowercase__ = os.path.abspath(os.path.dirname(A ) )
lowercase__ = os.path.join(A , '''poker_hands.txt''' )
with open(A ) as file_hand:
for line in file_hand:
lowercase__ = line[:14].strip()
lowercase__ = line[15:].strip()
lowercase__ ,lowercase__ = PokerHand(A ), PokerHand(A )
lowercase__ = player.compare_with(A )
if output == "Win":
answer += 1
assert answer == 376
| 2
| 1
|
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ) -> Tuple:
lowercase_ : Union[str, Any] = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
lowercase_ : Optional[Any] = MaskFormerConfig(backbone_config=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
lowercase_ : List[Any] = 8_47
lowercase_ : str = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
lowercase_ : List[str] = 1_50
lowercase_ : int = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
lowercase_ : Any = 1_71
lowercase_ : int = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
lowercase_ : List[str] = 1_33
lowercase_ : List[str] = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
lowercase_ : Optional[Any] = 19
lowercase_ : Optional[int] = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
lowercase_ : Dict = 65
lowercase_ : Optional[Any] = '''mapillary-vistas-id2label.json'''
lowercase_ : List[Any] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase_ : List[Any] = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
return config
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
lowercase_ : int = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ) -> Any:
lowercase_ : Tuple = dct.pop(__SCREAMING_SNAKE_CASE )
lowercase_ : str = val
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
lowercase_ : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowercase_ : Any = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowercase_ : str = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
lowercase_ : Any = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Union[str, Any] = in_proj_weight[:dim, :]
lowercase_ : Optional[Any] = in_proj_bias[: dim]
lowercase_ : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
lowercase_ : Any = in_proj_bias[
dim : dim * 2
]
lowercase_ : Union[str, Any] = in_proj_weight[
-dim :, :
]
lowercase_ : Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ) -> str:
lowercase_ : Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowercase_ : List[str] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
lowercase_ : Optional[Any] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : int = in_proj_weight[: hidden_size, :]
lowercase_ : List[str] = in_proj_bias[:config.hidden_size]
lowercase_ : Optional[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
lowercase_ : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
lowercase_ : Tuple = in_proj_weight[-hidden_size :, :]
lowercase_ : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowercase_ : int = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
lowercase_ : Any = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : List[str] = in_proj_weight[: hidden_size, :]
lowercase_ : List[Any] = in_proj_bias[:config.hidden_size]
lowercase_ : Optional[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
lowercase_ : Tuple = in_proj_bias[hidden_size : hidden_size * 2]
lowercase_ : str = in_proj_weight[-hidden_size :, :]
lowercase_ : List[str] = in_proj_bias[-hidden_size :]
# fmt: on
def lowercase__( ) -> Dict:
lowercase_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int = False ) -> Optional[int]:
lowercase_ : Optional[int] = get_maskformer_config(__SCREAMING_SNAKE_CASE )
# load original state_dict
with open(__SCREAMING_SNAKE_CASE , 'rb' ) as f:
lowercase_ : Optional[int] = pickle.load(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
lowercase_ : Optional[int] = create_rename_keys(__SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_swin_q_k_v(__SCREAMING_SNAKE_CASE , config.backbone_config )
read_in_decoder_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# update to torch tensors
for key, value in state_dict.items():
lowercase_ : int = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# load 🤗 model
lowercase_ : Union[str, Any] = MaskFormerForInstanceSegmentation(__SCREAMING_SNAKE_CASE )
model.eval()
for name, param in model.named_parameters():
print(__SCREAMING_SNAKE_CASE , param.shape )
lowercase_ : List[str] = model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__SCREAMING_SNAKE_CASE ) == 0, F'''Unexpected keys: {unexpected_keys}'''
# verify results
lowercase_ : Tuple = prepare_img()
if "vistas" in model_name:
lowercase_ : Dict = 65
elif "cityscapes" in model_name:
lowercase_ : Dict = 6_55_35
else:
lowercase_ : Tuple = 2_55
lowercase_ : str = True if '''ade''' in model_name else False
lowercase_ : Dict = MaskFormerImageProcessor(ignore_index=__SCREAMING_SNAKE_CASE , reduce_labels=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='pt' )
lowercase_ : List[Any] = model(**__SCREAMING_SNAKE_CASE )
print('Logits:' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
lowercase_ : List[str] = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(F'''nielsr/{model_name}''' )
image_processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you\'d like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 367
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]]
lowercase_ : List[Any] = DisjunctiveConstraint(__UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids ,__UpperCamelCase ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(__UpperCamelCase ) # fails here
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
lowercase_ : Dict = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(1 )
lowercase_ : str = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = dc.update(2 )
lowercase_ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(3 )
lowercase_ : Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowercase_ : Union[str, Any] = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : str = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowercase_ , lowercase_ , lowercase_ : List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 321
| 0
|
'''simple docstring'''
import math
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase=0 ) -> int: # a graph with Node 0,1,...,N-1
lowerCAmelCase__ : Union[str, Any] = n
lowerCAmelCase__ : Optional[Any] = [
[math.inf for j in range(0 ,__UpperCAmelCase )] for i in range(0 ,__UpperCAmelCase )
] # adjacency matrix for weight
lowerCAmelCase__ : Union[str, Any] = [
[math.inf for j in range(0 ,__UpperCAmelCase )] for i in range(0 ,__UpperCAmelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> str:
lowerCAmelCase__ : Optional[int] = w
def UpperCAmelCase_ ( self ) -> int:
for k in range(0 ,self.n ):
for i in range(0 ,self.n ):
for j in range(0 ,self.n ):
lowerCAmelCase__ : Optional[Any] = min(self.dp[i][j] ,self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[Any]:
return self.dp[u][v]
if __name__ == "__main__":
_lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 37
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> np.array:
lowerCAmelCase__ : Dict = F'''{sampling_rate}'''
lowerCAmelCase__ : Any = '1'
lowerCAmelCase__ : Optional[Any] = 'f32le'
lowerCAmelCase__ : Any = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(SCREAMING_SNAKE_CASE_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCAmelCase__ : List[Any] = ffmpeg_process.communicate(SCREAMING_SNAKE_CASE_ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
lowerCAmelCase__ : List[str] = output_stream[0]
lowerCAmelCase__ : str = np.frombuffer(SCREAMING_SNAKE_CASE_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "f32le" , ) -> Dict:
lowerCAmelCase__ : Optional[Any] = F'''{sampling_rate}'''
lowerCAmelCase__ : Any = '1'
if format_for_conversion == "s16le":
lowerCAmelCase__ : Dict = 2
elif format_for_conversion == "f32le":
lowerCAmelCase__ : List[str] = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowerCAmelCase__ : Tuple = platform.system()
if system == "Linux":
lowerCAmelCase__ : str = 'alsa'
lowerCAmelCase__ : str = 'default'
elif system == "Darwin":
lowerCAmelCase__ : Any = 'avfoundation'
lowerCAmelCase__ : Tuple = ':0'
elif system == "Windows":
lowerCAmelCase__ : Any = 'dshow'
lowerCAmelCase__ : int = 'default'
lowerCAmelCase__ : Any = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
lowerCAmelCase__ : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCAmelCase__ : str = _ffmpeg_stream(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for item in iterator:
yield item
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "f32le" , ) -> str:
if stream_chunk_s is not None:
lowerCAmelCase__ : Union[str, Any] = stream_chunk_s
else:
lowerCAmelCase__ : Tuple = chunk_length_s
lowerCAmelCase__ : Any = ffmpeg_microphone(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , format_for_conversion=SCREAMING_SNAKE_CASE_ )
if format_for_conversion == "s16le":
lowerCAmelCase__ : Optional[Any] = np.intaa
lowerCAmelCase__ : Optional[Any] = 2
elif format_for_conversion == "f32le":
lowerCAmelCase__ : Optional[Any] = np.floataa
lowerCAmelCase__ : Optional[Any] = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowerCAmelCase__ : Dict = chunk_length_s / 6
lowerCAmelCase__ : int = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ):
lowerCAmelCase__ : Dict = [stride_length_s, stride_length_s]
lowerCAmelCase__ : Union[str, Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCAmelCase__ : List[Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCAmelCase__ : Any = datetime.datetime.now()
lowerCAmelCase__ : Any = datetime.timedelta(seconds=SCREAMING_SNAKE_CASE_ )
for item in chunk_bytes_iter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=(stride_left, stride_right) , stream=SCREAMING_SNAKE_CASE_ ):
# Put everything back in numpy scale
lowerCAmelCase__ : Any = np.frombuffer(item['raw'] , dtype=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
lowerCAmelCase__ : Optional[int] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ) -> Optional[int]:
lowerCAmelCase__ : Union[str, Any] = b''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowerCAmelCase__ : List[str] = 0
for raw in iterator:
acc += raw
if stream and len(SCREAMING_SNAKE_CASE_ ) < chunk_len:
lowerCAmelCase__ : Tuple = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(SCREAMING_SNAKE_CASE_ ) >= chunk_len:
# We are flushing the accumulator
lowerCAmelCase__ : Dict = (_stride_left, stride_right)
lowerCAmelCase__ : Any = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
lowerCAmelCase__ : Optional[int] = False
yield item
lowerCAmelCase__ : Optional[int] = stride_left
lowerCAmelCase__ : Optional[int] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(SCREAMING_SNAKE_CASE_ ) > stride_left:
lowerCAmelCase__ : Tuple = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
lowerCAmelCase__ : Any = False
yield item
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : int = 2**24 # 16Mo
try:
with subprocess.Popen(SCREAMING_SNAKE_CASE_ , stdout=subprocess.PIPE , bufsize=SCREAMING_SNAKE_CASE_ ) as ffmpeg_process:
while True:
lowerCAmelCase__ : List[str] = ffmpeg_process.stdout.read(SCREAMING_SNAKE_CASE_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 212
| 0
|
'''simple docstring'''
import logging
from transformers import PretrainedConfig
_lowercase = logging.getLogger(__name__)
_lowercase = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Optional[Any] = '''bertabs'''
def __init__( self , _lowercase=30_522 , _lowercase=512 , _lowercase=6 , _lowercase=512 , _lowercase=8 , _lowercase=512 , _lowercase=0.2 , _lowercase=6 , _lowercase=768 , _lowercase=8 , _lowercase=2_048 , _lowercase=0.2 , **_lowercase , ):
"""simple docstring"""
super().__init__(**_lowercase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_pos
_lowerCAmelCase = enc_layers
_lowerCAmelCase = enc_hidden_size
_lowerCAmelCase = enc_heads
_lowerCAmelCase = enc_ff_size
_lowerCAmelCase = enc_dropout
_lowerCAmelCase = dec_layers
_lowerCAmelCase = dec_hidden_size
_lowerCAmelCase = dec_heads
_lowerCAmelCase = dec_ff_size
_lowerCAmelCase = dec_dropout
| 229
|
'''simple docstring'''
from __future__ import annotations
import math
def A (__lowerCamelCase :list , __lowerCamelCase :list ):
if len(__lowerCamelCase ) != 2 or len(a[0] ) != 2 or len(__lowerCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
_lowerCAmelCase = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def A (__lowerCamelCase :list , __lowerCamelCase :list ):
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowerCamelCase ) )
]
def A (__lowerCamelCase :list , __lowerCamelCase :list ):
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowerCamelCase ) )
]
def A (__lowerCamelCase :list ):
if len(__lowerCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
_lowerCAmelCase = len(__lowerCamelCase )
_lowerCAmelCase = matrix_length // 2
_lowerCAmelCase = [[a[i][j] for j in range(__lowerCamelCase , __lowerCamelCase )] for i in range(__lowerCamelCase )]
_lowerCAmelCase = [
[a[i][j] for j in range(__lowerCamelCase , __lowerCamelCase )] for i in range(__lowerCamelCase , __lowerCamelCase )
]
_lowerCAmelCase = [[a[i][j] for j in range(__lowerCamelCase )] for i in range(__lowerCamelCase )]
_lowerCAmelCase = [[a[i][j] for j in range(__lowerCamelCase )] for i in range(__lowerCamelCase , __lowerCamelCase )]
return top_left, top_right, bot_left, bot_right
def A (__lowerCamelCase :list ):
return len(__lowerCamelCase ), len(matrix[0] )
def A (__lowerCamelCase :list ):
print("""\n""".join(str(__lowerCamelCase ) for line in matrix ) )
def A (__lowerCamelCase :list , __lowerCamelCase :list ):
if matrix_dimensions(__lowerCamelCase ) == (2, 2):
return default_matrix_multiplication(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = split_matrix(__lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = split_matrix(__lowerCamelCase )
_lowerCAmelCase = actual_strassen(__lowerCamelCase , matrix_subtraction(__lowerCamelCase , __lowerCamelCase ) )
_lowerCAmelCase = actual_strassen(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
_lowerCAmelCase = actual_strassen(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
_lowerCAmelCase = actual_strassen(__lowerCamelCase , matrix_subtraction(__lowerCamelCase , __lowerCamelCase ) )
_lowerCAmelCase = actual_strassen(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , matrix_addition(__lowerCamelCase , __lowerCamelCase ) )
_lowerCAmelCase = actual_strassen(matrix_subtraction(__lowerCamelCase , __lowerCamelCase ) , matrix_addition(__lowerCamelCase , __lowerCamelCase ) )
_lowerCAmelCase = actual_strassen(matrix_subtraction(__lowerCamelCase , __lowerCamelCase ) , matrix_addition(__lowerCamelCase , __lowerCamelCase ) )
_lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) , __lowerCamelCase )
_lowerCAmelCase = matrix_addition(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = matrix_addition(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) , __lowerCamelCase )
# construct the new matrix from our 4 quadrants
_lowerCAmelCase = []
for i in range(len(__lowerCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__lowerCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def A (__lowerCamelCase :list , __lowerCamelCase :list ):
if matrix_dimensions(__lowerCamelCase )[1] != matrix_dimensions(__lowerCamelCase )[0]:
_lowerCAmelCase = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
f'Matrix A: {matrixa}\n'
f'Matrix B: {matrixa}'
)
raise Exception(__lowerCamelCase )
_lowerCAmelCase = matrix_dimensions(__lowerCamelCase )
_lowerCAmelCase = matrix_dimensions(__lowerCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
_lowerCAmelCase = max(*__lowerCamelCase , *__lowerCamelCase )
_lowerCAmelCase = int(math.pow(2 , math.ceil(math.loga(__lowerCamelCase ) ) ) )
_lowerCAmelCase = matrixa
_lowerCAmelCase = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
_lowerCAmelCase = actual_strassen(__lowerCamelCase , __lowerCamelCase )
# Removing the additional zeros
for i in range(0 , __lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
_lowercase = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
_lowercase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 229
| 1
|
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ):
A : Tuple = StableUnCLIPPipeline
A : Dict = TEXT_TO_IMAGE_PARAMS
A : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
A : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
A : int = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
A : Union[str, Any] = False
def __lowerCamelCase ( self ):
lowercase : Dict = 32
lowercase : List[str] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowercase : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowercase : List[str] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=SCREAMING_SNAKE_CASE__ , projection_dim=SCREAMING_SNAKE_CASE__ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase : Optional[Any] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=SCREAMING_SNAKE_CASE__ , num_layers=1 , )
torch.manual_seed(0 )
lowercase : Union[str, Any] = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=SCREAMING_SNAKE_CASE__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
lowercase : List[Any] = StableUnCLIPImageNormalizer(embedding_dim=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowercase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowercase : str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=SCREAMING_SNAKE_CASE__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase : int = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=SCREAMING_SNAKE_CASE__ , layers_per_block=1 , upcast_attention=SCREAMING_SNAKE_CASE__ , use_linear_projection=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
lowercase : Union[str, Any] = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , steps_offset=1 , )
torch.manual_seed(0 )
lowercase : Dict = AutoencoderKL()
lowercase : int = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ):
if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ):
lowercase : str = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowercase : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCamelCase ( self ):
lowercase : Optional[int] = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=SCREAMING_SNAKE_CASE__ )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
lowercase : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
lowercase : Union[str, Any] = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase : List[Any] = pipe('''anime turle''' , generator=SCREAMING_SNAKE_CASE__ , output_type='''np''' )
lowercase : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase : Tuple = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
lowercase : int = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase : Optional[int] = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
lowercase : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 337
|
from __future__ import annotations
__a = []
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(_UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, -1, -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, len(_UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
if row >= len(_UpperCamelCase ):
solution.append(_UpperCamelCase )
printboard(_UpperCamelCase )
print()
return True
for i in range(len(_UpperCamelCase ) ):
if is_safe(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : int = 1
solve(_UpperCamelCase, row + 1 )
lowercase : Tuple = 0
return False
def __lowercase ( _UpperCamelCase ) ->None:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
for j in range(len(_UpperCamelCase ) ):
if board[i][j] == 1:
print('''Q''', end=''' ''' )
else:
print('''.''', end=''' ''' )
print()
# n=int(input("The no. of queens"))
__a = 8
__a = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 337
| 1
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_( snake_case : list , snake_case : int ):
'''simple docstring'''
if len(snake_case ) <= 1 or n <= 1:
return
insert_next(snake_case , n - 1 )
rec_insertion_sort(snake_case , n - 1 )
def UpperCamelCase_( snake_case : list , snake_case : int ):
'''simple docstring'''
if index >= len(snake_case ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
snake_case_ , snake_case_ = (
collection[index],
collection[index - 1],
)
insert_next(snake_case , index + 1 )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Union[str, Any] = input("Enter integers separated by spaces: ")
_SCREAMING_SNAKE_CASE : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 92
|
'''simple docstring'''
def UpperCamelCase_( snake_case : int = 1_0_0_0 ):
'''simple docstring'''
snake_case_ = 3
snake_case_ = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 92
| 1
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_A : List[str] = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_A : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def _a ( UpperCAmelCase ) -> str:
"""simple docstring"""
if "://" in dataset_path:
lowerCamelCase__ : Union[str, Any] = dataset_path.split('''://''' )[1]
return dataset_path
def _a ( UpperCAmelCase ) -> bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : List[str] = not is_remote_filesystem(UpperCAmelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(UpperCAmelCase ) , fs._strip_protocol(UpperCAmelCase ) )
else:
fs.mv(UpperCAmelCase , UpperCAmelCase , recursive=UpperCAmelCase )
def _a ( ) -> None:
"""simple docstring"""
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowerCamelCase__ : Any = None
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : Tuple = threading.Lock()
| 142
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Optional[int] = ["image_processor", "tokenizer"]
_UpperCAmelCase : Dict = "BridgeTowerImageProcessor"
_UpperCAmelCase : Dict = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : Any , A : List[Any] , A : Tuple ) ->Dict:
super().__init__(A , A )
def __call__( self : str , A : int , A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , A : bool = True , A : Union[bool, str, PaddingStrategy] = False , A : Union[bool, str, TruncationStrategy] = None , A : Optional[int] = None , A : int = 0 , A : Optional[int] = None , A : Optional[bool] = None , A : Optional[bool] = None , A : bool = False , A : bool = False , A : bool = False , A : bool = False , A : bool = True , A : Optional[Union[str, TensorType]] = None , **A : Union[str, Any] , ) ->BatchEncoding:
lowerCamelCase__ : Optional[int] = self.tokenizer(
text=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_token_type_ids=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_length=A , verbose=A , return_tensors=A , **A , )
# add pixel_values + pixel_mask
lowerCamelCase__ : int = self.image_processor(
A , return_tensors=A , do_normalize=A , do_center_crop=A , **A )
encoding.update(A )
return encoding
def __lowerCamelCase ( self : str , *A : Dict , **A : List[str] ) ->List[Any]:
return self.tokenizer.batch_decode(*A , **A )
def __lowerCamelCase ( self : List[str] , *A : Optional[Any] , **A : Tuple ) ->Dict:
return self.tokenizer.decode(*A , **A )
@property
def __lowerCamelCase ( self : str ) ->Optional[Any]:
lowerCamelCase__ : Optional[int] = self.tokenizer.model_input_names
lowerCamelCase__ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 142
| 1
|
"""simple docstring"""
import requests
_lowerCAmelCase : int = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"] , 1 ):
print(f'{i}.) {article["title"]}' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 350
|
"""simple docstring"""
from collections.abc import Callable
class UpperCAmelCase_ :
def __init__( self : Dict , A : Callable | None = None ):
# Stores actual heap items.
_UpperCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
_UpperCAmelCase : dict = {}
# Stores current size of heap.
_UpperCAmelCase : Tuple = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_UpperCAmelCase : Any = key or (lambda A : x)
def snake_case_ ( self : List[Any] , A : int ):
return int((i - 1) / 2 ) if i > 0 else None
def snake_case_ ( self : List[Any] , A : int ):
_UpperCAmelCase : Tuple = int(2 * i + 1 )
return left if 0 < left < self.size else None
def snake_case_ ( self : List[Any] , A : int ):
_UpperCAmelCase : Tuple = int(2 * i + 2 )
return right if 0 < right < self.size else None
def snake_case_ ( self : Optional[int] , A : int , A : int ):
_UpperCAmelCase , _UpperCAmelCase : int = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_UpperCAmelCase , _UpperCAmelCase : Any = self.arr[j], self.arr[i]
def snake_case_ ( self : List[str] , A : int , A : int ):
return self.arr[i][1] < self.arr[j][1]
def snake_case_ ( self : Dict , A : int ):
_UpperCAmelCase : str = self._left(A )
_UpperCAmelCase : str = self._right(A )
_UpperCAmelCase : List[Any] = i
if left is not None and not self._cmp(A , A ):
_UpperCAmelCase : Optional[int] = left
if right is not None and not self._cmp(A , A ):
_UpperCAmelCase : Any = right
return valid_parent
def snake_case_ ( self : Tuple , A : int ):
_UpperCAmelCase : Tuple = self._parent(A )
while parent is not None and not self._cmp(A , A ):
self._swap(A , A )
_UpperCAmelCase , _UpperCAmelCase : Dict = parent, self._parent(A )
def snake_case_ ( self : Optional[int] , A : int ):
_UpperCAmelCase : Tuple = self._get_valid_parent(A )
while valid_parent != index:
self._swap(A , A )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = valid_parent, self._get_valid_parent(A )
def snake_case_ ( self : Dict , A : int , A : int ):
if item not in self.pos_map:
return
_UpperCAmelCase : Any = self.pos_map[item]
_UpperCAmelCase : Optional[int] = [item, self.key(A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(A )
self._heapify_down(A )
def snake_case_ ( self : List[str] , A : int ):
if item not in self.pos_map:
return
_UpperCAmelCase : str = self.pos_map[item]
del self.pos_map[item]
_UpperCAmelCase : Tuple = self.arr[self.size - 1]
_UpperCAmelCase : List[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(A )
self._heapify_down(A )
def snake_case_ ( self : Any , A : int , A : int ):
_UpperCAmelCase : Any = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(A )] )
else:
_UpperCAmelCase : Any = [item, self.key(A )]
_UpperCAmelCase : List[Any] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def snake_case_ ( self : Tuple ):
return self.arr[0] if self.size else None
def snake_case_ ( self : Any ):
_UpperCAmelCase : Dict = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __snake_case ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 202
| 0
|
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__snake_case = logging.get_logger(__name__)
@add_end_docstrings(A__ )
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , **UpperCamelCase_ ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , '''vision''' )
self.check_model_type(UpperCamelCase_ )
def __call__( self , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
if "text_queries" in kwargs:
UpperCamelCase__ :str = kwargs.pop('''text_queries''' )
if isinstance(UpperCamelCase_ , (str, Image.Image) ):
UpperCamelCase__ :List[Any] = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
UpperCamelCase__ :Any = image
UpperCamelCase__ :Optional[int] = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = {}
if "threshold" in kwargs:
UpperCamelCase__ :List[Any] = kwargs['''threshold''']
if "top_k" in kwargs:
UpperCamelCase__ :Optional[Any] = kwargs['''top_k''']
return {}, {}, postprocess_params
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :str = load_image(inputs['''image'''] )
UpperCamelCase__ :Dict = inputs['''candidate_labels''']
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :Optional[Any] = candidate_labels.split(''',''' )
UpperCamelCase__ :Union[str, Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCamelCase_ ):
UpperCamelCase__ :List[str] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework )
UpperCamelCase__ :Optional[int] = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCamelCase_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :int = model_inputs.pop('''target_size''' )
UpperCamelCase__ :Optional[int] = model_inputs.pop('''candidate_label''' )
UpperCamelCase__ :List[Any] = model_inputs.pop('''is_last''' )
UpperCamelCase__ :str = self.model(**UpperCamelCase_ )
UpperCamelCase__ :str = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=0.1 , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :str = []
for model_output in model_outputs:
UpperCamelCase__ :Optional[int] = model_output['''candidate_label''']
UpperCamelCase__ :Union[str, Any] = BaseModelOutput(UpperCamelCase_ )
UpperCamelCase__ :Tuple = self.image_processor.post_process_object_detection(
outputs=UpperCamelCase_ , threshold=UpperCamelCase_ , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
UpperCamelCase__ :Dict = outputs['''scores'''][index].item()
UpperCamelCase__ :int = self._get_bounding_box(outputs['''boxes'''][index][0] )
UpperCamelCase__ :Optional[int] = {'''score''': score, '''label''': label, '''box''': box}
results.append(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x["score"] , reverse=UpperCamelCase_ )
if top_k:
UpperCamelCase__ :Tuple = results[:top_k]
return results
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = box.int().tolist()
UpperCamelCase__ :Union[str, Any] = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 97
|
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__UpperCamelCase , 2 ) + pow(__UpperCamelCase , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321
| 0
|
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 0 )-> Any:
"""simple docstring"""
snake_case_ = right or len(__a ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__a , __a , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> List[Any]:
"""simple docstring"""
for char in word:
snake_case_ = ord(SCREAMING_SNAKE_CASE )
if not _is_chinese_char(SCREAMING_SNAKE_CASE ):
return 0
return 1
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> str:
"""simple docstring"""
snake_case_ = set()
for token in tokens:
snake_case_ = len(SCREAMING_SNAKE_CASE ) > 1 and is_chinese(SCREAMING_SNAKE_CASE )
if chinese_word:
word_set.add(SCREAMING_SNAKE_CASE )
snake_case_ = list(SCREAMING_SNAKE_CASE )
return word_list
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> List[str]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
snake_case_ = max([len(SCREAMING_SNAKE_CASE ) for w in chinese_word_set] )
snake_case_ = bert_tokens
snake_case_ , snake_case_ = 0, len(SCREAMING_SNAKE_CASE )
while start < end:
snake_case_ = True
if is_chinese(bert_word[start] ):
snake_case_ = min(end - start , SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE , 1 , -1 ):
snake_case_ = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case_ = '''##''' + bert_word[j]
snake_case_ = start + i
snake_case_ = False
break
if single_word:
start += 1
return bert_word
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
snake_case_ = []
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 100 ):
snake_case_ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['''cws'''] ).cws
snake_case_ = [get_chinese_word(SCREAMING_SNAKE_CASE ) for r in res]
ltp_res.extend(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
snake_case_ = []
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 100 ):
snake_case_ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
snake_case_ = []
for input_ids, chinese_word in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case_ = []
for id in input_ids:
snake_case_ = bert_tokenizer._convert_id_to_token(SCREAMING_SNAKE_CASE )
input_tokens.append(SCREAMING_SNAKE_CASE )
snake_case_ = add_sub_symbol(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(SCREAMING_SNAKE_CASE ):
if token[:2] == "##":
snake_case_ = token[2:]
# save chinese tokens' pos
if len(SCREAMING_SNAKE_CASE ) == 1 and _is_chinese_char(ord(SCREAMING_SNAKE_CASE ) ):
ref_id.append(SCREAMING_SNAKE_CASE )
ref_ids.append(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
return ref_ids
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
snake_case_ = f.readlines()
snake_case_ = [line.strip() for line in data if len(SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case_ = LTP(args.ltp ) # faster in GPU device
snake_case_ = BertTokenizer.from_pretrained(args.bert )
snake_case_ = prepare_ref(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
snake_case_ = [json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' for ref in ref_ids]
f.writelines(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
UpperCAmelCase = parser.parse_args()
main(args)
| 267
| 0
|
'''simple docstring'''
from __future__ import annotations
_A : Dict = 10
def UpperCamelCase_ ( snake_case_ : list[int] ) -> list[int]:
'''simple docstring'''
__lowerCAmelCase = 1
__lowerCAmelCase = max(snake_case_ )
while placement <= max_digit:
# declare and initialize empty buckets
__lowerCAmelCase = [[] for _ in range(snake_case_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
__lowerCAmelCase = int((i / placement) % RADIX )
buckets[tmp].append(snake_case_ )
# put each buckets' contents into list_of_ints
__lowerCAmelCase = 0
for b in range(snake_case_ ):
for i in buckets[b]:
__lowerCAmelCase = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 229
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def a ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> Tuple:
raise NotImplementedError()
@abstractmethod
def a ( self : int ) -> Union[str, Any]:
raise NotImplementedError()
| 229
| 1
|
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
_snake_case : Any = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
_snake_case : Optional[int] = logging.get_logger(__name__)
class _UpperCAmelCase ( __lowerCamelCase ):
"""simple docstring"""
a_ = """maskformer"""
a_ = {"""hidden_size""": """mask_feature_size"""}
a_ = ["""resnet""", """swin"""]
a_ = ["""detr"""]
def __init__( self : str , lowerCAmelCase_ : int = 2_5_6 , lowerCAmelCase_ : int = 2_5_6 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[Dict] = None , lowerCAmelCase_ : Optional[Dict] = None , lowerCAmelCase_ : float = 0.02 , lowerCAmelCase_ : float = 1.0 , lowerCAmelCase_ : float = 1.0 , lowerCAmelCase_ : float = 1.0 , lowerCAmelCase_ : float = 20.0 , lowerCAmelCase_ : Optional[bool] = None , **lowerCAmelCase_ : Tuple , ) -> int:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__lowerCAmelCase = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(__lowercase , __lowercase ):
__lowerCAmelCase = backbone_config.pop('model_type' )
__lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCAmelCase = config_class.from_dict(__lowercase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__lowerCAmelCase = DetrConfig()
else:
# verify that the decoder is supported
__lowerCAmelCase = (
decoder_config.pop('model_type' ) if isinstance(__lowercase , __lowercase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(__lowercase , __lowercase ):
__lowerCAmelCase = CONFIG_MAPPING[decoder_type]
__lowerCAmelCase = config_class.from_dict(__lowercase )
__lowerCAmelCase = backbone_config
__lowerCAmelCase = decoder_config
# main feature dimension for the model
__lowerCAmelCase = fpn_feature_size
__lowerCAmelCase = mask_feature_size
# initializer
__lowerCAmelCase = init_std
__lowerCAmelCase = init_xavier_std
# Hungarian matcher && loss
__lowerCAmelCase = cross_entropy_weight
__lowerCAmelCase = dice_weight
__lowerCAmelCase = mask_weight
__lowerCAmelCase = use_auxiliary_loss
__lowerCAmelCase = no_object_weight
__lowerCAmelCase = output_auxiliary_logits
__lowerCAmelCase = self.decoder_config.encoder_attention_heads
__lowerCAmelCase = self.decoder_config.num_hidden_layers
super().__init__(**__lowercase )
@classmethod
def lowercase ( cls : Optional[int] , lowerCAmelCase_ : PretrainedConfig , lowerCAmelCase_ : PretrainedConfig , **lowerCAmelCase_ : Optional[Any] ) -> Union[str, Any]:
return cls(
backbone_config=__lowercase , decoder_config=__lowercase , **__lowercase , )
def lowercase ( self : Optional[int] ) -> Tuple:
__lowerCAmelCase = copy.deepcopy(self.__dict__ )
__lowerCAmelCase = self.backbone_config.to_dict()
__lowerCAmelCase = self.decoder_config.to_dict()
__lowerCAmelCase = self.__class__.model_type
return output
| 350
|
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_snake_case : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def a_ ( lowerCAmelCase_ : Union[List, PIL.Image.Image, torch.Tensor] ):
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead', lowerCAmelCase_, )
if isinstance(lowerCAmelCase_, torch.Tensor ):
return image
elif isinstance(lowerCAmelCase_, PIL.Image.Image ):
__lowerCAmelCase = [image]
if isinstance(image[0], PIL.Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image[0].size
__lowerCAmelCase , __lowerCAmelCase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__lowerCAmelCase = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
__lowerCAmelCase = np.concatenate(lowerCAmelCase_, axis=0 )
__lowerCAmelCase = np.array(lowerCAmelCase_ ).astype(np.floataa ) / 255.0
__lowerCAmelCase = image.transpose(0, 3, 1, 2 )
__lowerCAmelCase = 2.0 * image - 1.0
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ )
elif isinstance(image[0], torch.Tensor ):
__lowerCAmelCase = torch.cat(lowerCAmelCase_, dim=0 )
return image
def a_ ( lowerCAmelCase_ : Union[List, PIL.Image.Image, torch.Tensor] ):
if isinstance(lowerCAmelCase_, torch.Tensor ):
return mask
elif isinstance(lowerCAmelCase_, PIL.Image.Image ):
__lowerCAmelCase = [mask]
if isinstance(mask[0], PIL.Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = mask[0].size
__lowerCAmelCase , __lowerCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__lowerCAmelCase = [np.array(m.convert('L' ).resize((w, h), resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
__lowerCAmelCase = np.concatenate(lowerCAmelCase_, axis=0 )
__lowerCAmelCase = mask.astype(np.floataa ) / 255.0
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ )
elif isinstance(mask[0], torch.Tensor ):
__lowerCAmelCase = torch.cat(lowerCAmelCase_, dim=0 )
return mask
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
a_ = 42
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ) -> Optional[int]:
super().__init__()
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self : Dict , lowerCAmelCase_ : Union[torch.Tensor, PIL.Image.Image] , lowerCAmelCase_ : Union[torch.Tensor, PIL.Image.Image] , lowerCAmelCase_ : int = 2_5_0 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 1_0 , lowerCAmelCase_ : int = 1_0 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
__lowerCAmelCase = image
__lowerCAmelCase = _preprocess_image(lowerCAmelCase_ )
__lowerCAmelCase = original_image.to(device=self.device , dtype=self.unet.dtype )
__lowerCAmelCase = _preprocess_mask(lowerCAmelCase_ )
__lowerCAmelCase = mask_image.to(device=self.device , dtype=self.unet.dtype )
__lowerCAmelCase = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowerCAmelCase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__lowerCAmelCase = original_image.shape
__lowerCAmelCase = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.device )
__lowerCAmelCase = eta
__lowerCAmelCase = self.scheduler.timesteps[0] + 1
__lowerCAmelCase = generator[0] if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__lowerCAmelCase = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute previous image: x_t -> x_t-1
__lowerCAmelCase = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__lowerCAmelCase = self.scheduler.undo_step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = t
__lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCAmelCase = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 207
| 0
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCamelCase__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class a__ ( datasets.BuilderConfig ):
_a : Optional[datasets.Features] = None
def _a ( SCREAMING_SNAKE_CASE_ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE_ : List[int] , ):
import pyspark
def generate_fn():
__lowerCAmelCase = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id" ) )
for partition_id in partition_order:
__lowerCAmelCase = df_with_partition_id.select("*" ).where(F"""part_id = {partition_id}""" ).drop("part_id" )
__lowerCAmelCase = partition_df.collect()
__lowerCAmelCase = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class a__ ( _BaseExamplesIterable ):
def __init__( self , _A , _A=None , ):
"""simple docstring"""
__lowerCAmelCase = df
__lowerCAmelCase = partition_order or range(self.df.rdd.getNumPartitions() )
__lowerCAmelCase = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
"""simple docstring"""
yield from self.generate_examples_fn()
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(_A )
return SparkExamplesIterable(self.df , partition_order=_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.split_shard_indices_by_worker(_A , _A )
return SparkExamplesIterable(self.df , partition_order=_A )
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return len(self.partition_order )
class a__ ( datasets.DatasetBuilder ):
_a : Optional[Any] = SparkConfig
def __init__( self , _A , _A = None , _A = None , **_A , ):
"""simple docstring"""
import pyspark
__lowerCAmelCase = pyspark.sql.SparkSession.builder.getOrCreate()
__lowerCAmelCase = df
__lowerCAmelCase = working_dir
super().__init__(
cache_dir=_A , config_name=str(self.df.semanticHash() ) , **_A , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
def create_cache_and_write_probe(_A ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=_A )
__lowerCAmelCase = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(_A , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__lowerCAmelCase = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(_A ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
import pyspark
def get_arrow_batch_size(_A ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
__lowerCAmelCase = self.df.count()
__lowerCAmelCase = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__lowerCAmelCase = (
self.df.limit(_A )
.repartition(1 )
.mapInArrow(_A , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__lowerCAmelCase = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__lowerCAmelCase = min(_A , int(approx_total_size / max_shard_size ) )
__lowerCAmelCase = self.df.repartition(_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , ):
"""simple docstring"""
import pyspark
__lowerCAmelCase = ParquetWriter if file_format == "parquet" else ArrowWriter
__lowerCAmelCase = os.path.join(self._working_dir , os.path.basename(_A ) ) if self._working_dir else fpath
__lowerCAmelCase = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__lowerCAmelCase = self.config.features
__lowerCAmelCase = self._writer_batch_size
__lowerCAmelCase = self._fs.storage_options
def write_arrow(_A ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__lowerCAmelCase = pyspark.TaskContext().taskAttemptId()
__lowerCAmelCase = next(_A , _A )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
__lowerCAmelCase = 0
__lowerCAmelCase = writer_class(
features=_A , path=working_fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , writer_batch_size=_A , storage_options=_A , embed_local_files=_A , )
__lowerCAmelCase = pa.Table.from_batches([first_batch] )
writer.write_table(_A )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__lowerCAmelCase , __lowerCAmelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
__lowerCAmelCase = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , writer_batch_size=_A , storage_options=_A , embed_local_files=_A , )
__lowerCAmelCase = pa.Table.from_batches([batch] )
writer.write_table(_A )
if writer._num_bytes > 0:
__lowerCAmelCase , __lowerCAmelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(_A ) ):
__lowerCAmelCase = os.path.join(os.path.dirname(_A ) , os.path.basename(_A ) )
shutil.move(_A , _A )
__lowerCAmelCase = (
self.df.mapInArrow(_A , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __SCREAMING_SNAKE_CASE( self , _A , _A = "arrow" , _A = None , _A = None , **_A , ):
"""simple docstring"""
self._validate_cache_dir()
__lowerCAmelCase = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(_A )
__lowerCAmelCase = not is_remote_filesystem(self._fs )
__lowerCAmelCase = os.path.join if is_local else posixpath.join
__lowerCAmelCase = "-TTTTT-SSSSS-of-NNNNN"
__lowerCAmelCase = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
__lowerCAmelCase = path_join(self._output_dir , _A )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = []
__lowerCAmelCase = []
for task_id, content in self._prepare_split_single(_A , _A , _A ):
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(_A )
__lowerCAmelCase = total_num_examples
__lowerCAmelCase = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
__lowerCAmelCase = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__lowerCAmelCase = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
_A , _A , _A , ):
rename(
_A , fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , f"""{global_shard_id:05d}""" ).replace("NNNNN" , f"""{total_shards:05d}""" ) , )
__lowerCAmelCase = []
__lowerCAmelCase = 0
for i in range(len(_A ) ):
__lowerCAmelCase , __lowerCAmelCase = task_id_and_num_shards[i]
for shard_id in range(_A ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(_A , len(_A ) ).map(lambda _A : _rename_shard(*_A ) ).collect()
else:
# don't use any pattern
__lowerCAmelCase = 0
__lowerCAmelCase = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , fpath.replace(_A , "" ) , )
def __SCREAMING_SNAKE_CASE( self , _A , ):
"""simple docstring"""
return SparkExamplesIterable(self.df )
| 92
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a__ :
def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , _A=2 , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scope
__lowerCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__lowerCAmelCase = (image_size // patch_size) ** 2
__lowerCAmelCase = num_patches + 2
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = DeiTModel(config=_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = DeiTForMaskedImageModeling(config=_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(_A )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = DeiTForMaskedImageModeling(_A )
model.to(_A )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.type_sequence_label_size
__lowerCAmelCase = DeiTForImageClassification(_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = DeiTForImageClassification(_A )
model.to(_A )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( snake_case__ , snake_case__ , unittest.TestCase ):
_a : Optional[Any] = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_a : int = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_a : Optional[Any] = False
_a : Tuple = False
_a : Tuple = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = DeiTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=False ):
"""simple docstring"""
__lowerCAmelCase = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_A )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.train()
__lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A )
__lowerCAmelCase = model(**_A ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowerCAmelCase = False
__lowerCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(_A ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
__lowerCAmelCase = model_class(_A )
model.gradient_checkpointing_enable()
model.to(_A )
model.train()
__lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A )
__lowerCAmelCase = model(**_A ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_A ),
*get_values(_A ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type['title']}""" ):
__lowerCAmelCase = problem_type["title"]
__lowerCAmelCase = problem_type["num_labels"]
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.train()
__lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A )
if problem_type["num_labels"] > 1:
__lowerCAmelCase = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
__lowerCAmelCase = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_A ) as warning_list:
__lowerCAmelCase = model(**_A ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = DeiTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _a ( ):
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
_A )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=_A , return_tensors="pt" ).to(_A )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**_A )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
__lowerCAmelCase = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=_A , return_tensors="pt" )
__lowerCAmelCase = inputs.pixel_values.to(_A )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__lowerCAmelCase = model(_A )
| 92
| 1
|
"""simple docstring"""
def lowerCAmelCase__ ( _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Any:
"""simple docstring"""
snake_case = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case = 0
while b > 0:
if b & 1:
snake_case = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 356
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_12 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase="None" , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
"""simple docstring"""
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = relative_attention
snake_case = position_biased_input
snake_case = pos_att_type
snake_case = scope
def snake_case ( self ):
"""simple docstring"""
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case = None
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=lowerCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = TFDebertaVaModel(config=lowerCAmelCase )
snake_case = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
snake_case = [input_ids, input_mask]
snake_case = model(lowerCAmelCase )
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = TFDebertaVaForMaskedLM(config=lowerCAmelCase )
snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.num_labels
snake_case = TFDebertaVaForSequenceClassification(config=lowerCAmelCase )
snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.num_labels
snake_case = TFDebertaVaForTokenClassification(config=lowerCAmelCase )
snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = TFDebertaVaForQuestionAnswering(config=lowerCAmelCase )
snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,
) = config_and_inputs
snake_case = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCAmelCase : Any = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : List[Any] = False
def snake_case ( self ):
"""simple docstring"""
snake_case = TFDebertaVaModelTester(self )
snake_case = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
self.assertIsNotNone(lowerCAmelCase )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def snake_case ( self ):
"""simple docstring"""
pass
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
snake_case = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
snake_case = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0]
snake_case = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1E-4 )
| 149
| 0
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 84
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_A : int = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class a__ ( a_, unittest.TestCase ):
__lowerCAmelCase = DebertaVaTokenizer
__lowerCAmelCase = DebertaVaTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
def __magic_name__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase : Any = DebertaVaTokenizer(_a , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self , _a ):
lowercase : int = "this is a test"
lowercase : Tuple = "this is a test"
return input_text, output_text
def __magic_name__ ( self ):
lowercase : List[Any] = "<pad>"
lowercase : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __magic_name__ ( self ):
lowercase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(_a ) , 30_001 )
def __magic_name__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def __magic_name__ ( self ):
# fmt: off
lowercase : List[str] = " \tHeLLo!how \n Are yoU? "
lowercase : str = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
lowercase : Union[str, Any] = DebertaVaTokenizer(_a , do_lower_case=_a )
lowercase : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_a , add_special_tokens=_a ) )
self.assertListEqual(_a , _a )
lowercase : str = DebertaVaTokenizerFast(_a , do_lower_case=_a )
lowercase : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_a , add_special_tokens=_a ) )
self.assertListEqual(_a , _a )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def __magic_name__ ( self ):
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def __magic_name__ ( self ):
pass
def __magic_name__ ( self ):
# fmt: off
lowercase : Optional[Any] = "I was born in 92000, and this is falsé."
lowercase : Tuple = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
lowercase : List[Any] = DebertaVaTokenizer(_a , split_by_punct=_a )
lowercase : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_a , add_special_tokens=_a ) )
self.assertListEqual(_a , _a )
lowercase : Union[str, Any] = DebertaVaTokenizerFast(_a , split_by_punct=_a )
lowercase : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_a , add_special_tokens=_a ) )
self.assertListEqual(_a , _a )
def __magic_name__ ( self ):
# fmt: off
lowercase : int = "I was born in 92000, and this is falsé."
lowercase : Tuple = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
lowercase : List[str] = DebertaVaTokenizer(_a , do_lower_case=_a , split_by_punct=_a )
lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_a , add_special_tokens=_a ) )
self.assertListEqual(_a , _a )
lowercase : Union[str, Any] = DebertaVaTokenizerFast(_a , do_lower_case=_a , split_by_punct=_a )
lowercase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_a , add_special_tokens=_a ) )
self.assertListEqual(_a , _a )
def __magic_name__ ( self ):
# fmt: off
lowercase : List[Any] = "I was born in 92000, and this is falsé."
lowercase : Optional[int] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
lowercase : List[Any] = DebertaVaTokenizer(_a , do_lower_case=_a , split_by_punct=_a )
lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_a , add_special_tokens=_a ) )
self.assertListEqual(_a , _a )
lowercase : Optional[int] = DebertaVaTokenizerFast(_a , do_lower_case=_a , split_by_punct=_a )
lowercase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_a , add_special_tokens=_a ) )
self.assertListEqual(_a , _a )
def __magic_name__ ( self ):
# fmt: off
lowercase : int = "I was born in 92000, and this is falsé."
lowercase : Dict = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
lowercase : Union[str, Any] = DebertaVaTokenizer(_a , do_lower_case=_a , split_by_punct=_a )
lowercase : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_a , add_special_tokens=_a ) )
self.assertListEqual(_a , _a )
lowercase : Union[str, Any] = DebertaVaTokenizerFast(_a , do_lower_case=_a , split_by_punct=_a )
lowercase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_a , add_special_tokens=_a ) )
self.assertListEqual(_a , _a )
def __magic_name__ ( self ):
# fmt: off
lowercase : Dict = " \tHeLLo!how \n Are yoU? "
lowercase : str = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
lowercase : Optional[int] = DebertaVaTokenizer(_a , do_lower_case=_a , split_by_punct=_a )
lowercase : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(_a , add_special_tokens=_a ) )
self.assertListEqual(_a , _a )
lowercase : List[str] = DebertaVaTokenizerFast(_a , do_lower_case=_a , split_by_punct=_a )
lowercase : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_a , add_special_tokens=_a ) )
self.assertListEqual(_a , _a )
def __magic_name__ ( self ):
lowercase : str = self.get_tokenizer()
lowercase : Dict = self.get_rust_tokenizer()
lowercase : str = "I was born in 92000, and this is falsé."
lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_a , add_special_tokens=_a ) )
lowercase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_a , add_special_tokens=_a ) )
self.assertListEqual(_a , _a )
lowercase : str = tokenizer.encode(_a , add_special_tokens=_a )
lowercase : Dict = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowercase : Optional[int] = self.get_rust_tokenizer()
lowercase : Tuple = tokenizer.encode(_a )
lowercase : List[str] = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __magic_name__ ( self ):
lowercase : str = "This is a test"
lowercase : Tuple = [13, 1, 4_398, 25, 21, 1_289]
lowercase : Optional[int] = ["▁", "T", "his", "▁is", "▁a", "▁test"]
lowercase : Optional[Any] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
lowercase : Any = DebertaVaTokenizer(_a , keep_accents=_a )
lowercase : Dict = DebertaVaTokenizerFast(_a , keep_accents=_a )
lowercase : str = tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowercase : str = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowercase : str = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(_a , _a )
lowercase : int = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowercase : Any = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowercase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(_a , _a )
# fmt: off
lowercase : int = "I was born in 92000, and this is falsé."
lowercase : Any = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
lowercase : List[str] = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
lowercase : str = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
lowercase : Tuple = tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowercase : List[Any] = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowercase : str = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(_a , _a )
lowercase : Optional[int] = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowercase : List[Any] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowercase : List[Any] = rust_tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(_a , _a )
def __magic_name__ ( self ):
lowercase : Optional[int] = DebertaVaTokenizer(_a )
lowercase : List[Any] = tokenizer.encode("sequence builders" )
lowercase : Dict = tokenizer.encode("multi-sequence build" )
lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(_a )
lowercase : Dict = tokenizer.build_inputs_with_special_tokens(_a , _a )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _a )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _a , )
@slow
def __magic_name__ ( self ):
# fmt: off
lowercase : Dict = {"input_ids": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 202
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : str = logging.get_logger(__name__)
_snake_case : List[str] = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class A ( _a ):
lowercase_ = "roberta"
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any]=5_02_65 , lowerCAmelCase_ : Dict=7_68 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Tuple=30_72 , lowerCAmelCase_ : Union[str, Any]="gelu" , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Any=5_12 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : List[str]=0.0_2 , lowerCAmelCase_ : List[Any]=1e-12 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : List[str]=0 , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : List[Any]="absolute" , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Dict=None , **lowerCAmelCase_ : Tuple , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = use_cache
_a = classifier_dropout
class A ( _a ):
@property
def __lowerCAmelCase ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 353
|
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class A ( _a ):
lowercase_ = 42
lowercase_ = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 179
| 0
|
from __future__ import annotations
import math
from collections.abc import Callable
def UpperCAmelCase ( a_ , a_ , a_ , a_ = 1_0_0 , ) -> float:
"""simple docstring"""
__A = x_start
__A = fnc(a_ )
__A = 0.0
for _ in range(a_ ):
# Approximates curve as a sequence of linear lines and sums their length
__A = (x_end - x_start) / steps + xa
__A = fnc(a_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
__A = xa
__A = fxa
return length
if __name__ == "__main__":
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
return math.sin(1_0 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
SCREAMING_SNAKE_CASE :Tuple = 10
while i <= 10_0000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 15
|
'''simple docstring'''
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(a__ )
# Initialize Result
__SCREAMING_SNAKE_CASE = []
# Traverse through all denomination
for denomination in reversed(a__ ):
# Find denominations
while int(a__ ) >= int(a__ ):
total_value -= int(a__ )
answer.append(a__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase : Dict = []
UpperCAmelCase : List[str] = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
UpperCAmelCase : List[str] = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(f"""Denomination {i}: """).strip()))
UpperCAmelCase : str = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase : int = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
UpperCAmelCase : Any = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(f"""Following is minimal change for {value}: """)
UpperCAmelCase : Any = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 267
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , _a=True , _a=1 / 255 , _a=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__magic_name__ : Tuple = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
__magic_name__ : Any = parent
__magic_name__ : Tuple = batch_size
__magic_name__ : List[str] = num_channels
__magic_name__ : int = min_resolution
__magic_name__ : Optional[Any] = max_resolution
__magic_name__ : Any = do_resize
__magic_name__ : Dict = size
__magic_name__ : List[Any] = do_normalize
__magic_name__ : List[str] = image_mean
__magic_name__ : Tuple = image_std
__magic_name__ : Union[str, Any] = do_rescale
__magic_name__ : str = rescale_factor
__magic_name__ : List[str] = do_pad
def SCREAMING_SNAKE_CASE ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE ( self , _a , _a=False ):
if not batched:
__magic_name__ : Dict = image_inputs[0]
if isinstance(_a , Image.Image ):
__magic_name__ , __magic_name__ : Optional[int] = image.size
else:
__magic_name__ , __magic_name__ : str = image.shape[1], image.shape[2]
if w < h:
__magic_name__ : List[Any] = int(self.size["shortest_edge"] * h / w )
__magic_name__ : Dict = self.size["shortest_edge"]
elif w > h:
__magic_name__ : List[Any] = self.size["shortest_edge"]
__magic_name__ : List[str] = int(self.size["shortest_edge"] * w / h )
else:
__magic_name__ : Any = self.size["shortest_edge"]
__magic_name__ : int = self.size["shortest_edge"]
else:
__magic_name__ : Optional[Any] = []
for image in image_inputs:
__magic_name__ , __magic_name__ : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__magic_name__ : List[Any] = max(_a , key=lambda _a : item[0] )[0]
__magic_name__ : Optional[int] = max(_a , key=lambda _a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = DetaImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = DetaImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , "image_mean" ) )
self.assertTrue(hasattr(_a , "image_std" ) )
self.assertTrue(hasattr(_a , "do_normalize" ) )
self.assertTrue(hasattr(_a , "do_resize" ) )
self.assertTrue(hasattr(_a , "do_rescale" ) )
self.assertTrue(hasattr(_a , "do_pad" ) )
self.assertTrue(hasattr(_a , "size" ) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , _a )
def SCREAMING_SNAKE_CASE ( self ):
pass
def SCREAMING_SNAKE_CASE ( self ):
# Initialize image_processing
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
__magic_name__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__magic_name__ , __magic_name__ : Any = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ , __magic_name__ : Any = self.image_processor_tester.get_expected_values(_a , batched=_a )
__magic_name__ : int = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self ):
# Initialize image_processing
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
__magic_name__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__magic_name__ , __magic_name__ : str = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Any = image_processing(_a , return_tensors="pt" ).pixel_values
__magic_name__ , __magic_name__ : Union[str, Any] = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self ):
# Initialize image_processing
__magic_name__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
__magic_name__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__magic_name__ , __magic_name__ : List[Any] = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : List[str] = image_processing(_a , return_tensors="pt" ).pixel_values
__magic_name__ , __magic_name__ : int = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE ( self ):
# prepare image and target
__magic_name__ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__magic_name__ : Tuple = json.loads(f.read() )
__magic_name__ : Any = {"image_id": 39_769, "annotations": target}
# encode them
__magic_name__ : Union[str, Any] = DetaImageProcessor()
__magic_name__ : str = image_processing(images=_a , annotations=_a , return_tensors="pt" )
# verify pixel values
__magic_name__ : List[Any] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _a )
__magic_name__ : Tuple = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _a , atol=1e-4 ) )
# verify area
__magic_name__ : int = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _a ) )
# verify boxes
__magic_name__ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _a )
__magic_name__ : List[Any] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _a , atol=1e-3 ) )
# verify image_id
__magic_name__ : Any = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _a ) )
# verify is_crowd
__magic_name__ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _a ) )
# verify class_labels
__magic_name__ : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _a ) )
# verify orig_size
__magic_name__ : List[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _a ) )
# verify size
__magic_name__ : Any = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _a ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
# prepare image, target and masks_path
__magic_name__ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__magic_name__ : List[Any] = json.loads(f.read() )
__magic_name__ : Any = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
__magic_name__ : Dict = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__magic_name__ : List[Any] = DetaImageProcessor(format="coco_panoptic" )
__magic_name__ : Optional[int] = image_processing(images=_a , annotations=_a , masks_path=_a , return_tensors="pt" )
# verify pixel values
__magic_name__ : Any = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _a )
__magic_name__ : List[Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _a , atol=1e-4 ) )
# verify area
__magic_name__ : Union[str, Any] = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _a ) )
# verify boxes
__magic_name__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _a )
__magic_name__ : int = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _a , atol=1e-3 ) )
# verify image_id
__magic_name__ : Optional[int] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _a ) )
# verify is_crowd
__magic_name__ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _a ) )
# verify class_labels
__magic_name__ : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _a ) )
# verify masks
__magic_name__ : List[Any] = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _a )
# verify orig_size
__magic_name__ : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _a ) )
# verify size
__magic_name__ : Optional[int] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _a ) )
| 41
|
def lowerCAmelCase_ ( _snake_case : int ) -> bool:
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ):
__magic_name__ : Union[str, Any] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_snake_case )
if number < 0:
return False
__magic_name__ : List[str] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
| 1
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
_a = logging.get_logger(__name__)
enable_full_determinism()
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = UNetaDModel
UpperCamelCase__ = "sample"
@property
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
_UpperCAmelCase = torch.tensor([10] ).to(UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return (3, 32, 32)
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = UNetaDModel
UpperCamelCase__ = "sample"
@property
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 4
_UpperCAmelCase = 4
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
_UpperCAmelCase = torch.tensor([10] ).to(UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return (4, 32, 32)
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return (4, 32, 32)
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(UpperCAmelCase )
_UpperCAmelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=UpperCAmelCase )
model.to(UpperCAmelCase )
_UpperCAmelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=UpperCAmelCase )
model_accelerate.to(UpperCAmelCase )
model_accelerate.eval()
_UpperCAmelCase = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase = noise.to(UpperCAmelCase )
_UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase )
_UpperCAmelCase = model_accelerate(UpperCAmelCase , UpperCAmelCase )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=UpperCAmelCase , low_cpu_mem_usage=UpperCAmelCase )
model_normal_load.to(UpperCAmelCase )
model_normal_load.eval()
_UpperCAmelCase = model_normal_load(UpperCAmelCase , UpperCAmelCase )['sample']
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-3 )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(UpperCAmelCase )
_UpperCAmelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase = noise.to(UpperCAmelCase )
_UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(UpperCAmelCase , UpperCAmelCase ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-3 ) )
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = UNetaDModel
UpperCamelCase__ = "sample"
@property
def UpperCamelCase ( self , UpperCAmelCase=(32, 32) ):
"""simple docstring"""
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return (3, 32, 32)
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1e-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(UpperCAmelCase )
_UpperCAmelCase = self.dummy_input
_UpperCAmelCase = floats_tensor((4, 3) + (256, 256) ).to(UpperCAmelCase )
_UpperCAmelCase = noise
_UpperCAmelCase = model(**UpperCAmelCase )
assert image is not None, "Make sure output is not None"
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(UpperCAmelCase )
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (256, 256)
_UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(UpperCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(UpperCAmelCase , UpperCAmelCase ).sample
_UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-2 ) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(UpperCAmelCase )
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(UpperCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(UpperCAmelCase , UpperCAmelCase ).sample
_UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.03_25, -0.09_00, -0.08_69, -0.03_32, -0.07_25, -0.02_70, -0.01_01, 0.02_27, 0.02_56] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-2 ) )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
| 39
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""image_processor""", """tokenizer"""]
lowercase__ = """BlipImageProcessor"""
lowercase__ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Dict, lowerCamelCase : Dict, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = False
super().__init__(lowerCamelCase, lowerCamelCase )
lowercase__ = self.image_processor
def __call__( self : int, lowerCamelCase : ImageInput = None, lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, lowerCamelCase : bool = True, lowerCamelCase : Union[bool, str, PaddingStrategy] = False, lowerCamelCase : Union[bool, str, TruncationStrategy] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : int = 0, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = True, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : Any, ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
lowercase__ = self.tokenizer
lowercase__ = self.tokenizer(
text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, )
return text_encoding
# add pixel_values
lowercase__ = self.image_processor(lowerCamelCase, return_tensors=lowerCamelCase )
if text is not None:
lowercase__ = self.tokenizer(
text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, )
else:
lowercase__ = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase )
return encoding_image_processor
def lowercase__ ( self : Tuple, *lowerCamelCase : Union[str, Any], **lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : List[str], *lowerCamelCase : int, **lowerCamelCase : List[str] ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase )
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 207
| 0
|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[Any] = DistilBertTokenizer
a__ : int = DistilBertTokenizerFast
a__ : int = True
@slow
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Any = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''')
__UpperCamelCase :Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowercase)
__UpperCamelCase :Optional[int] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowercase)
__UpperCamelCase :Optional[Any] = tokenizer.build_inputs_with_special_tokens(__lowercase)
__UpperCamelCase :int = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase)
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 105
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__lowercase = logging.getLogger(__name__)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : str = """token-classification"""
def __init__( self , __lowercase) -> str:
if type(__lowercase) == dict:
__UpperCamelCase :List[Any] = Namespace(**__lowercase)
__UpperCamelCase :Dict = import_module('''tasks''')
try:
__UpperCamelCase :str = getattr(__lowercase , hparams.task_type)
__UpperCamelCase :TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""")
__UpperCamelCase :Tuple = self.token_classification_task.get_labels(hparams.labels)
__UpperCamelCase :Tuple = CrossEntropyLoss().ignore_index
super().__init__(__lowercase , len(self.labels) , self.mode)
def UpperCamelCase__ ( self , **__lowercase) -> List[Any]:
return self.model(**__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Any:
__UpperCamelCase :str = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
__UpperCamelCase :Union[str, Any] = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
__UpperCamelCase :Dict = self(**__lowercase)
__UpperCamelCase :str = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :List[Any] = self.hparams
for mode in ["train", "dev", "test"]:
__UpperCamelCase :int = self._feature_file(__lowercase)
if os.path.exists(__lowercase) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __lowercase)
__UpperCamelCase :Any = torch.load(__lowercase)
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir)
__UpperCamelCase :Any = self.token_classification_task.read_examples_from_file(args.data_dir , __lowercase)
__UpperCamelCase :Union[str, Any] = self.token_classification_task.convert_examples_to_features(
__lowercase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet''']) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__lowercase , pad_on_left=bool(self.config.model_type in ['''xlnet''']) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , __lowercase)
torch.save(__lowercase , __lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = False) -> DataLoader:
__UpperCamelCase :Tuple = self._feature_file(__lowercase)
logger.info('''Loading features from cached file %s''' , __lowercase)
__UpperCamelCase :str = torch.load(__lowercase)
__UpperCamelCase :int = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
__UpperCamelCase :Optional[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
if features[0].token_type_ids is not None:
__UpperCamelCase :str = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
else:
__UpperCamelCase :Union[str, Any] = torch.tensor([0 for f in features] , dtype=torch.long)
# HACK(we will not use this anymore soon)
__UpperCamelCase :int = torch.tensor([f.label_ids for f in features] , dtype=torch.long)
return DataLoader(
TensorDataset(__lowercase , __lowercase , __lowercase , __lowercase) , batch_size=__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Dict:
"""Compute validation""" ""
__UpperCamelCase :int = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
__UpperCamelCase :Any = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
__UpperCamelCase :Any = self(**__lowercase)
__UpperCamelCase , __UpperCamelCase :Tuple = outputs[:2]
__UpperCamelCase :List[str] = logits.detach().cpu().numpy()
__UpperCamelCase :List[str] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase__ ( self , __lowercase) -> List[str]:
__UpperCamelCase :Tuple = torch.stack([x['''val_loss'''] for x in outputs]).mean()
__UpperCamelCase :str = np.concatenate([x['''pred'''] for x in outputs] , axis=0)
__UpperCamelCase :Any = np.argmax(__lowercase , axis=2)
__UpperCamelCase :str = np.concatenate([x['''target'''] for x in outputs] , axis=0)
__UpperCamelCase :List[str] = dict(enumerate(self.labels))
__UpperCamelCase :Tuple = [[] for _ in range(out_label_ids.shape[0])]
__UpperCamelCase :Any = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
__UpperCamelCase :Any = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(__lowercase , __lowercase),
'''precision''': precision_score(__lowercase , __lowercase),
'''recall''': recall_score(__lowercase , __lowercase),
'''f1''': fa_score(__lowercase , __lowercase),
}
__UpperCamelCase :Dict = dict(results.items())
__UpperCamelCase :List[str] = results
return ret, preds_list, out_label_list
def UpperCamelCase__ ( self , __lowercase) -> int:
# when stable
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[Any] = self._eval_end(__lowercase)
__UpperCamelCase :Tuple = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase__ ( self , __lowercase) -> int:
# updating to test_epoch_end instead of deprecated test_end
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[int] = self._eval_end(__lowercase)
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
__UpperCamelCase :Optional[Any] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase__ ( __lowercase , __lowercase) -> Union[str, Any]:
# Add NER specific options
BaseTransformer.add_model_specific_args(__lowercase , __lowercase)
parser.add_argument(
'''--task_type''' , default='''NER''' , type=__lowercase , help='''Task type to fine tune in training (e.g. NER, POS, etc)''')
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__lowercase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=__lowercase , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__lowercase , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''')
return parser
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__lowercase = NERTransformer.add_model_specific_args(parser, os.getcwd())
__lowercase = parser.parse_args()
__lowercase = NERTransformer(args)
__lowercase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__lowercase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
__lowercase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 105
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def _lowerCAmelCase ( _UpperCamelCase : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( _UpperCamelCase : int ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =str(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =[n]
for i in range(1 , len(_UpperCamelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _lowerCAmelCase ( _UpperCamelCase : int ) -> bool:
"""simple docstring"""
if len(str(_UpperCamelCase ) ) > 3:
if not is_prime(int(str(_UpperCamelCase )[-3:] ) ) or not is_prime(int(str(_UpperCamelCase )[:3] ) ):
return False
return True
def _lowerCAmelCase ( _UpperCamelCase : int = 11 ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =13
while len(_UpperCamelCase ) != count:
if validate(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =list_truncated_nums(_UpperCamelCase )
if all(is_prime(_UpperCamelCase ) for i in list_nums ):
list_truncated_primes.append(_UpperCamelCase )
num += 2
return list_truncated_primes
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f'''{sum(compute_truncated_primes(1_1)) = }''')
| 47
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Any = logging.get_logger(__name__)
A__: List[str] = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = """pegasus"""
UpperCamelCase__ = ["""past_key_values"""]
UpperCamelCase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self: List[str] , __lowerCamelCase: Dict=5_0265 , __lowerCamelCase: int=1024 , __lowerCamelCase: Dict=12 , __lowerCamelCase: Dict=4096 , __lowerCamelCase: str=16 , __lowerCamelCase: List[Any]=12 , __lowerCamelCase: int=4096 , __lowerCamelCase: Optional[Any]=16 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: int=0.0 , __lowerCamelCase: List[str]=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: List[Any]="gelu" , __lowerCamelCase: List[Any]=1024 , __lowerCamelCase: int=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Any=0.02 , __lowerCamelCase: Union[str, Any]=0 , __lowerCamelCase: str=False , __lowerCamelCase: Optional[int]=0 , __lowerCamelCase: Optional[Any]=1 , __lowerCamelCase: Optional[int]=1 , **__lowerCamelCase: Union[str, Any] , ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = vocab_size
UpperCamelCase__: List[Any] = max_position_embeddings
UpperCamelCase__: Tuple = d_model
UpperCamelCase__: str = encoder_ffn_dim
UpperCamelCase__: Optional[int] = encoder_layers
UpperCamelCase__: List[Any] = encoder_attention_heads
UpperCamelCase__: Tuple = decoder_ffn_dim
UpperCamelCase__: int = decoder_layers
UpperCamelCase__: List[str] = decoder_attention_heads
UpperCamelCase__: int = dropout
UpperCamelCase__: List[str] = attention_dropout
UpperCamelCase__: Tuple = activation_dropout
UpperCamelCase__: Optional[int] = activation_function
UpperCamelCase__: Dict = init_std
UpperCamelCase__: Optional[Any] = encoder_layerdrop
UpperCamelCase__: Any = decoder_layerdrop
UpperCamelCase__: Optional[int] = use_cache
UpperCamelCase__: Optional[int] = encoder_layers
UpperCamelCase__: List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , )
@property
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
return self.d_model
| 149
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 297
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 1
@register_to_config
def __init__( self , UpperCAmelCase = 1000 , UpperCAmelCase = None ) -> List[Any]:
'''simple docstring'''
self.set_timesteps(UpperCAmelCase )
# standard deviation of the initial noise distribution
lowercase_ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowercase_ = 4
# running values
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Optional[int]:
'''simple docstring'''
lowercase_ = num_inference_steps
lowercase_ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowercase_ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowercase_ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowercase_ = torch.sin(steps * math.pi / 2 ) ** 2
lowercase_ = (1.0 - self.betas**2) ** 0.5
lowercase_ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowercase_ = timesteps.to(UpperCAmelCase )
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
lowercase_ = (self.timesteps == timestep).nonzero().item()
lowercase_ = timestep_index + 1
lowercase_ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCAmelCase )
if len(self.ets ) == 1:
lowercase_ = self.ets[-1]
elif len(self.ets ) == 2:
lowercase_ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowercase_ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowercase_ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowercase_ = self._get_prev_sample(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = self.alphas[timestep_index]
lowercase_ = self.betas[timestep_index]
lowercase_ = self.alphas[prev_timestep_index]
lowercase_ = self.betas[prev_timestep_index]
lowercase_ = (sample - sigma * ets) / max(UpperCAmelCase , 1e-8 )
lowercase_ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> List[str]:
'''simple docstring'''
return self.config.num_train_timesteps
| 297
| 1
|
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
UpperCamelCase = logging.get_logger(__name__)
class snake_case_ :
__A : str
__A : str = None
@staticmethod
def __UpperCamelCase ( ) -> Union[str, Any]:
raise NotImplementedError
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : str , **lowercase_ : Dict ) -> int:
raise NotImplementedError
def __UpperCamelCase ( self : Any , lowercase_ : int ) -> List[str]:
raise NotImplementedError
def __UpperCamelCase ( self : str ) -> int:
if not self.is_available():
raise RuntimeError(
F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] ) -> Union[str, Any]:
return F'''`pip install {cls.pip_package or cls.name}`'''
class snake_case_ ( __A ):
__A : List[str] = "optuna"
@staticmethod
def __UpperCamelCase ( ) -> int:
return is_optuna_available()
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : int , lowercase_ : str , **lowercase_ : Optional[Any] ) -> Optional[int]:
return run_hp_search_optuna(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[int] ) -> Any:
return default_hp_space_optuna(lowercase_ )
class snake_case_ ( __A ):
__A : Optional[Any] = "ray"
__A : Dict = "'ray[tune]'"
@staticmethod
def __UpperCamelCase ( ) -> Tuple:
return is_ray_available()
def __UpperCamelCase ( self : Dict , lowercase_ : Dict , lowercase_ : int , lowercase_ : str , **lowercase_ : Any ) -> Optional[int]:
return run_hp_search_ray(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : List[Any] , lowercase_ : int ) -> Tuple:
return default_hp_space_ray(lowercase_ )
class snake_case_ ( __A ):
__A : List[Any] = "sigopt"
@staticmethod
def __UpperCamelCase ( ) -> Union[str, Any]:
return is_sigopt_available()
def __UpperCamelCase ( self : List[Any] , lowercase_ : str , lowercase_ : int , lowercase_ : str , **lowercase_ : Any ) -> Any:
return run_hp_search_sigopt(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Optional[Any] ) -> Optional[Any]:
return default_hp_space_sigopt(lowercase_ )
class snake_case_ ( __A ):
__A : Any = "wandb"
@staticmethod
def __UpperCamelCase ( ) -> List[Any]:
return is_wandb_available()
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : str , **lowercase_ : Dict ) -> Dict:
return run_hp_search_wandb(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : List[str] , lowercase_ : Optional[Any] ) -> Dict:
return default_hp_space_wandb(lowercase_ )
UpperCamelCase = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowercase_ ( ):
lowercase__ : Dict = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_lowerCamelCase) > 0:
lowercase__ : List[str] = available_backends[0].name
if len(_lowerCamelCase) > 1:
logger.info(
f'''{len(_lowerCamelCase)} hyperparameter search backends available. Using {name} as the default.''')
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values()))
| 87
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = 42
# setable values
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = None
@classmethod
def UpperCamelCase__( cls , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
return cls(common=__lowerCamelCase , init_noise_sigma=__lowerCamelCase , timesteps=__lowerCamelCase )
@dataclass
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
class __snake_case ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
_lowerCamelCase = 42
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return True
@register_to_config
def __init__( self , __lowerCamelCase = 1000 , __lowerCamelCase = 0.0_0_0_1 , __lowerCamelCase = 0.0_2 , __lowerCamelCase = "linear" , __lowerCamelCase = None , __lowerCamelCase = "fixed_small" , __lowerCamelCase = True , __lowerCamelCase = "epsilon" , __lowerCamelCase = jnp.floataa , ):
'''simple docstring'''
__A : Tuple = dtype
def UpperCamelCase__( self , __lowerCamelCase = None ):
'''simple docstring'''
if common is None:
__A : Tuple = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__A : Tuple = jnp.array(1.0 , dtype=self.dtype )
__A : Optional[int] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__lowerCamelCase , init_noise_sigma=__lowerCamelCase , timesteps=__lowerCamelCase , )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
return sample
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = () ):
'''simple docstring'''
__A : Optional[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__A : Optional[Any] = (jnp.arange(0 , __lowerCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__lowerCamelCase , timesteps=__lowerCamelCase , )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None ):
'''simple docstring'''
__A : int = state.common.alphas_cumprod[t]
__A : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__A : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__A : Dict = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__A : List[Any] = jnp.clip(__lowerCamelCase , a_min=1e-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__A : Optional[Any] = jnp.log(jnp.clip(__lowerCamelCase , a_min=1e-2_0 ) )
elif variance_type == "fixed_large":
__A : Tuple = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__A : Union[str, Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__A : Optional[Any] = variance
__A : Optional[Any] = state.common.betas[t]
__A : Any = (predicted_variance + 1) / 2
__A : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True , ):
'''simple docstring'''
__A : Optional[int] = timestep
if key is None:
__A : List[Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__A , __A : Tuple = jnp.split(__lowerCamelCase , sample.shape[1] , axis=1 )
else:
__A : List[str] = None
# 1. compute alphas, betas
__A : Dict = state.common.alphas_cumprod[t]
__A : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__A : Tuple = 1 - alpha_prod_t
__A : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__A : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__A : Any = model_output
elif self.config.prediction_type == "v_prediction":
__A : str = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__A : str = jnp.clip(__lowerCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__A : Optional[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__A : Union[str, Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__A : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__A : List[Any] = jax.random.split(__lowerCamelCase , num=1 )
__A : List[str] = jax.random.normal(__lowerCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__lowerCamelCase , __lowerCamelCase , predicted_variance=__lowerCamelCase ) ** 0.5) * noise
__A : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__A : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__lowerCamelCase , state=__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
'''simple docstring'''
return add_noise_common(state.common , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
'''simple docstring'''
return get_velocity_common(state.common , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 179
| 0
|
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : Any = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
a : Any = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
a : str = {
"""vinai/phobert-base""": 2_5_6,
"""vinai/phobert-large""": 2_5_6,
}
def __lowerCamelCase ( _lowercase ) -> List[Any]:
UpperCAmelCase : str = set()
UpperCAmelCase : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase : int = char
UpperCAmelCase : Any = set(_lowercase )
return pairs
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , A , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , **A , ) -> Dict:
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , **A , )
UpperCAmelCase : Dict = vocab_file
UpperCAmelCase : List[str] = merges_file
UpperCAmelCase : Tuple = {}
UpperCAmelCase : List[str] = 0
UpperCAmelCase : str = 1
UpperCAmelCase : Optional[Any] = 2
UpperCAmelCase : Tuple = 3
self.add_from_file(A )
UpperCAmelCase : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(A , encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase : Union[str, Any] = merges_handle.read().split("""\n""" )[:-1]
UpperCAmelCase : int = [tuple(merge.split()[:-1] ) for merge in merges]
UpperCAmelCase : Tuple = dict(zip(A , range(len(A ) ) ) )
UpperCAmelCase : Dict = {}
def _lowercase( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Any = [self.cls_token_id]
UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : List[Any] = [self.sep_token_id]
UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowercase( self ) -> Any:
return len(self.encoder )
def _lowercase( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase( self , A ) -> Dict:
if token in self.cache:
return self.cache[token]
UpperCAmelCase : Optional[Any] = tuple(A )
UpperCAmelCase : List[Any] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
UpperCAmelCase : List[Any] = get_pairs(A )
if not pairs:
return token
while True:
UpperCAmelCase : List[Any] = min(A , key=lambda A : self.bpe_ranks.get(A , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase , UpperCAmelCase : Tuple = bigram
UpperCAmelCase : Dict = []
UpperCAmelCase : Optional[Any] = 0
while i < len(A ):
try:
UpperCAmelCase : Optional[int] = word.index(A , A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase : int = j
if word[i] == first and i < len(A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase : Tuple = tuple(A )
UpperCAmelCase : Tuple = new_word
if len(A ) == 1:
break
else:
UpperCAmelCase : Optional[int] = get_pairs(A )
UpperCAmelCase : List[str] = """@@ """.join(A )
UpperCAmelCase : Any = word[:-4]
UpperCAmelCase : Optional[Any] = word
return word
def _lowercase( self , A ) -> Any:
UpperCAmelCase : Dict = []
UpperCAmelCase : int = re.findall(r"""\S+\n?""" , A )
for token in words:
split_tokens.extend(list(self.bpe(A ).split(""" """ ) ) )
return split_tokens
def _lowercase( self , A ) -> int:
return self.encoder.get(A , self.encoder.get(self.unk_token ) )
def _lowercase( self , A ) -> Union[str, Any]:
return self.decoder.get(A , self.unk_token )
def _lowercase( self , A ) -> str:
UpperCAmelCase : int = """ """.join(A ).replace("""@@ """ , """""" ).strip()
return out_string
def _lowercase( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : str = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase : Dict = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
if os.path.abspath(self.merges_file ) != os.path.abspath(A ):
copyfile(self.merges_file , A )
return out_vocab_file, out_merge_file
def _lowercase( self , A ) -> str:
if isinstance(A , A ):
try:
with open(A , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(A )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
UpperCAmelCase : Optional[Any] = f.readlines()
for lineTmp in lines:
UpperCAmelCase : Optional[int] = lineTmp.strip()
UpperCAmelCase : List[str] = line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
UpperCAmelCase : Any = line[:idx]
UpperCAmelCase : Optional[Any] = len(self.encoder )
| 338
|
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
a : List[str] = logging.getLogger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A , A , A=None ) -> Union[str, Any]:
super().__init__(
A , question_encoder_tokenizer=A , generator_tokenizer=A , index=A , init_retrieval=A , )
UpperCAmelCase : Optional[Any] = None
def _lowercase( self , A ) -> List[Any]:
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
UpperCAmelCase : Tuple = self._infer_socket_ifname()
# avoid clash with the NCCL port
UpperCAmelCase : str = str(distributed_port + 1 )
UpperCAmelCase : Any = dist.new_group(ranks=A , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def _lowercase( self ) -> Dict:
return dist.get_rank(group=self.process_group ) == 0
def _lowercase( self , A , A , A=torch.floataa ) -> str:
UpperCAmelCase : List[Any] = torch.empty(A , dtype=A )
dist.scatter(A , src=0 , scatter_list=A , group=self.process_group )
return target_tensor
def _lowercase( self ) -> Any:
UpperCAmelCase : List[Any] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
UpperCAmelCase : Optional[int] = next((addr for addr in addrs if addr.startswith("""e""" )) , A )
return ifname
def _lowercase( self , A , A ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
UpperCAmelCase , UpperCAmelCase : str = self._main_retrieve(A , A )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A )
# distributed training
UpperCAmelCase : int = dist.get_world_size(group=self.process_group )
# gather logic
UpperCAmelCase : int = None
if self._is_main():
UpperCAmelCase : List[str] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(A )]
dist.gather(torch.tensor(A ) , dst=0 , gather_list=A , group=self.process_group )
# scatter logic
UpperCAmelCase : List[Any] = question_hidden_states.shape[0]
UpperCAmelCase : Tuple = []
UpperCAmelCase : Any = []
if self._is_main():
assert len(A ) == world_size
UpperCAmelCase , UpperCAmelCase : Optional[int] = self._main_retrieve(torch.cat(A ).numpy() , A )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = torch.tensor(A ), torch.tensor(A )
UpperCAmelCase : List[str] = self._chunk_tensor(A , A )
UpperCAmelCase : Union[str, Any] = self._chunk_tensor(A , A )
UpperCAmelCase : Tuple = self._scattered(A , [n_queries, n_docs] , target_type=torch.intaa )
UpperCAmelCase : Optional[Any] = self._scattered(A , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(A )
| 338
| 1
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : List[Any] =logging.get_logger(__name__)
_A : Tuple ={
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_A : List[Any] ={
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_A : List[Any] ={'''facebook/blenderbot-3B''': 128}
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["""input_ids""", """attention_mask"""]
a = BlenderbotTokenizer
def __init__( self: Union[str, Any] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: List[str]=None , UpperCamelCase__: int=None , UpperCamelCase__: Dict="replace" , UpperCamelCase__: Any="<s>" , UpperCamelCase__: Dict="</s>" , UpperCamelCase__: Any="</s>" , UpperCamelCase__: Union[str, Any]="<s>" , UpperCamelCase__: Tuple="<unk>" , UpperCamelCase__: Union[str, Any]="<pad>" , UpperCamelCase__: Optional[Any]="<mask>" , UpperCamelCase__: Tuple=False , UpperCamelCase__: str=True , **UpperCamelCase__: Optional[Any] , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase__ : List[str] = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) )
lowerCamelCase__ : Union[str, Any] = add_prefix_space
lowerCamelCase__ : Optional[int] = pre_tok_class(**UpperCamelCase__ )
lowerCamelCase__ : Dict = add_prefix_space
lowerCamelCase__ : Tuple = """post_processor"""
lowerCamelCase__ : Tuple = getattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
if tokenizer_component_instance:
lowerCamelCase__ : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase__ : str = tuple(state["""sep"""] )
if "cls" in state:
lowerCamelCase__ : Optional[Any] = tuple(state["""cls"""] )
lowerCamelCase__ : Optional[int] = False
if state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase__ : Tuple = add_prefix_space
lowerCamelCase__ : Optional[Any] = True
if state.get("""trim_offsets""" , UpperCamelCase__ ) != trim_offsets:
lowerCamelCase__ : int = trim_offsets
lowerCamelCase__ : int = True
if changes_to_apply:
lowerCamelCase__ : List[Any] = getattr(UpperCamelCase__ , state.pop("""type""" ) )
lowerCamelCase__ : Any = component_class(**UpperCamelCase__ )
setattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self: str ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: str ):
lowerCamelCase__ : Union[str, Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else value
lowerCamelCase__ : int = value
def lowerCamelCase_ ( self: Union[str, Any] , *UpperCamelCase__: Optional[Any] , **UpperCamelCase__: Any ):
lowerCamelCase__ : List[str] = kwargs.get("""is_split_into_words""" , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , *UpperCamelCase__: List[Any] , **UpperCamelCase__: int ):
lowerCamelCase__ : List[str] = kwargs.get("""is_split_into_words""" , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
lowerCamelCase__ : Dict = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[int] , UpperCamelCase__: Optional[List[int]] = None ):
lowerCamelCase__ : List[Any] = [self.sep_token_id]
lowerCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: List[int] , UpperCamelCase__: Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self: Any , UpperCamelCase__: "Conversation" ):
lowerCamelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase__ )
lowerCamelCase__ : str = """ """.join(UpperCamelCase__ )
lowerCamelCase__ : str = self.encode(UpperCamelCase__ )
if len(UpperCamelCase__ ) > self.model_max_length:
lowerCamelCase__ : List[Any] = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 41
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
lowerCamelCase__ : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
lowerCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase__ : Tuple = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
lowerCamelCase__ : Tuple = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str , **UpperCamelCase__: List[str] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: int , **UpperCamelCase__: Tuple ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase__ : Tuple = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Optional[Any] = self.get_tokenizer()
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : int = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : int = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase__ : List[Any] = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
lowerCamelCase__ : Tuple = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : Any = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = self.prepare_image_inputs()
lowerCamelCase__ : List[str] = image_processor(UpperCamelCase__ , return_tensors="""np""" )
lowerCamelCase__ : Optional[Any] = processor(images=UpperCamelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = """lower newer"""
lowerCamelCase__ : Union[str, Any] = processor(text=UpperCamelCase__ )
lowerCamelCase__ : Any = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Any = """lower newer"""
lowerCamelCase__ : Dict = self.prepare_image_inputs()
lowerCamelCase__ : Tuple = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(UpperCamelCase__ ):
processor()
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[str] = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : int = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : Union[str, Any] = processor.batch_decode(UpperCamelCase__ )
lowerCamelCase__ : Dict = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : int = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = """lower newer"""
lowerCamelCase__ : str = self.prepare_image_inputs()
lowerCamelCase__ : int = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 41
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_a : Dict= {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple= ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str= [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
_a : int= _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 95
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_a : int= False
class UpperCamelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase (self : Optional[Any]) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : int) -> Tuple:
__snake_case : str = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa)
pipe.to(_A)
pipe.set_progress_bar_config(disable=_A)
__snake_case : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg')
__snake_case : List[Any] = torch.manual_seed(0)
__snake_case : Optional[int] = pipe.dual_guided(
prompt='first prompt' , image=_A , text_to_image_strength=0.75 , generator=_A , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_A)
__snake_case : Optional[Any] = VersatileDiffusionPipeline.from_pretrained(_A , torch_dtype=torch.floataa)
pipe.to(_A)
pipe.set_progress_bar_config(disable=_A)
__snake_case : Tuple = generator.manual_seed(0)
__snake_case : int = pipe.dual_guided(
prompt='first prompt' , image=_A , text_to_image_strength=0.75 , generator=_A , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image).sum() < 1E-5, "Models don't have the same forward pass"
def _lowercase (self : Optional[Any]) -> Optional[int]:
__snake_case : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa)
pipe.to(_A)
pipe.set_progress_bar_config(disable=_A)
__snake_case : Tuple = 'cyberpunk 2077'
__snake_case : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg')
__snake_case : str = torch.manual_seed(0)
__snake_case : Union[str, Any] = pipe.dual_guided(
prompt=_A , image=_A , text_to_image_strength=0.75 , generator=_A , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__snake_case : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__snake_case : Tuple = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
__snake_case : List[str] = 'A painting of a squirrel eating a burger '
__snake_case : str = torch.manual_seed(0)
__snake_case : str = pipe.text_to_image(
prompt=_A , generator=_A , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy').images
__snake_case : str = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__snake_case : Any = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
__snake_case : List[str] = pipe.image_variation(_A , generator=_A , output_type='numpy').images
__snake_case : Dict = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__snake_case : List[Any] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
| 95
| 1
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _SCREAMING_SNAKE_CASE ( ) ->List[str]:
'''simple docstring'''
a : List[Any] = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=_lowercase , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=_lowercase , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=_lowercase )
return parser.parse_args()
def _SCREAMING_SNAKE_CASE ( ) ->List[str]:
'''simple docstring'''
a : Optional[int] = parse_args()
# Import training_script as a module.
a : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
a : List[str] = script_fpath.stem
a : Optional[Any] = importlib.import_module(_lowercase )
# Patch sys.argv
a : Optional[int] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 105
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __UpperCamelCase :
@staticmethod
def __a ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
pass
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple ) ->Dict:
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
a : Optional[Any] = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
lowerCamelCase : Union[str, Any] =MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : Tuple = pipeline(
"document-question-answering" , model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a : Optional[int] = INVOICE_URL
a : str = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , "" ) ) )
a : Union[str, Any] = "What is the placebo?"
a : Dict = [
{
"image": load_image(lowerCAmelCase__ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : Tuple = dqa_pipeline(lowerCAmelCase__ , top_k=2 )
self.assertEqual(
lowerCAmelCase__ , [
[
{"score": ANY(lowerCAmelCase__ ), "answer": ANY(lowerCAmelCase__ ), "start": ANY(lowerCAmelCase__ ), "end": ANY(lowerCAmelCase__ )},
{"score": ANY(lowerCAmelCase__ ), "answer": ANY(lowerCAmelCase__ ), "start": ANY(lowerCAmelCase__ ), "end": ANY(lowerCAmelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __a ( self ) -> List[Any]:
a : List[Any] = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
a : Dict = INVOICE_URL
a : List[str] = "How many cats are there?"
a : Tuple = [
{"score": 0.0_001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
a : Optional[int] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
a : Optional[int] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
a : List[Any] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
a : Any = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
a : Optional[int] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
a : Tuple = []
a : Optional[int] = []
a : List[str] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , words=lowerCAmelCase__ , boxes=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __a ( self ) -> Tuple:
a : int = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
a : List[str] = INVOICE_URL
a : List[Any] = "What is the invoice number?"
a : int = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
a : str = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
a : Any = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __a ( self ) -> Optional[int]:
a : List[str] = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
a : Optional[Any] = INVOICE_URL
a : Tuple = "What is the invoice number?"
a : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
a : str = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
a : Tuple = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __a ( self ) -> str:
a : Optional[int] = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase__ )
a : int = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase__ , revision="3dc6de3" , )
a : List[Any] = INVOICE_URL
a : Tuple = "What is the invoice number?"
a : List[str] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
a : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
a : List[Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
a : Dict = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , "" ) ) )
# This model should also work if `image` is set to None
a : Optional[Any] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __a ( self ) -> Tuple:
a : int = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase__ )
a : Tuple = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase__ , revision="3dc6de3" , max_seq_len=50 , )
a : List[str] = INVOICE_URL
a : Union[str, Any] = "What is the invoice number?"
a : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
a : List[str] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
a : List[Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , "" ) ) )
# This model should also work if `image` is set to None
a : Any = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def __a ( self ) -> int:
a : Tuple = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
a : Optional[Any] = INVOICE_URL
a : Tuple = "What is the invoice number?"
a : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def __a ( self ) -> int:
pass
| 105
| 1
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCamelCase_ : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1)
lowerCamelCase_ : Optional[Any] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class a__ :
A__ : int
A__ : Node | None
class a__ :
def __init__( self , UpperCAmelCase ) -> None:
__a = None
for i in sorted(UpperCAmelCase , reverse=UpperCAmelCase ):
__a = Node(UpperCAmelCase , self.head )
def __iter__( self ) -> Iterator[int]:
__a = self.head
while node:
yield node.data
__a = node.next_node
def __len__( self ) -> int:
return sum(1 for _ in self )
def __str__( self ) -> str:
return " -> ".join([str(UpperCAmelCase ) for node in self] )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
return SortedLinkedList(list(__lowerCamelCase ) + list(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : Tuple = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 197
|
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class a__ ( __snake_case ):
def __init__( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase ) -> List[Any]:
__a = parent
__a = config_class
__a = has_text_modality
__a = kwargs
__a = common_properties
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = self.config_class(**self.inputs_dict )
__a = (
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['vocab_size'] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCAmelCase , UpperCAmelCase ) , msg=f'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCAmelCase ):
try:
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.parent.assertEqual(
getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(UpperCAmelCase , UpperCAmelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCAmelCase ):
try:
__a = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(UpperCAmelCase , UpperCAmelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.config_class(**self.inputs_dict )
__a = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a = os.path.join(UpperCAmelCase , 'config.json' )
config_first.to_json_file(UpperCAmelCase )
__a = self.config_class.from_json_file(UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCAmelCase )
__a = self.config_class.from_pretrained(UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = self.config_class(**self.inputs_dict )
__a = 'test'
with tempfile.TemporaryDirectory() as tmpdirname:
__a = os.path.join(UpperCAmelCase , UpperCAmelCase )
config_first.save_pretrained(UpperCAmelCase )
__a = self.config_class.from_pretrained(UpperCAmelCase , subfolder=UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__a = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
if self.config_class.is_composition:
return
__a = self.config_class()
self.parent.assertIsNotNone(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = copy.deepcopy(UpperCAmelCase )
__a = self.config_class(**UpperCAmelCase )
__a = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) )
elif getattr(UpperCAmelCase , UpperCAmelCase ) != value:
wrong_values.append((key, getattr(UpperCAmelCase , UpperCAmelCase ), value) )
if len(UpperCAmelCase ) > 0:
__a = '\n'.join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 197
| 1
|
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCamelCase__ ( _A ):
a : Dict = SwinConfig()
a : Optional[Any] = swin_name.split('_' )
a : Dict = name_split[1]
a : Any = int(name_split[4] )
a : List[Any] = int(name_split[3][-1] )
if model_size == "tiny":
a : Optional[Any] = 96
a : int = (2, 2, 6, 2)
a : int = (3, 6, 12, 24)
elif model_size == "small":
a : Union[str, Any] = 96
a : str = (2, 2, 18, 2)
a : Optional[int] = (3, 6, 12, 24)
elif model_size == "base":
a : Dict = 128
a : Dict = (2, 2, 18, 2)
a : Tuple = (4, 8, 16, 32)
else:
a : Optional[Any] = 192
a : List[str] = (2, 2, 18, 2)
a : Tuple = (6, 12, 24, 48)
if "in22k" in swin_name:
a : int = 2_1841
else:
a : Optional[Any] = 1000
a : Union[str, Any] = 'huggingface/label-files'
a : List[Any] = 'imagenet-1k-id2label.json'
a : Optional[Any] = json.load(open(hf_hub_download(_A , _A , repo_type='dataset' ) , 'r' ) )
a : int = {int(_A ): v for k, v in idalabel.items()}
a : Optional[int] = idalabel
a : Any = {v: k for k, v in idalabel.items()}
a : Any = img_size
a : Any = num_classes
a : int = embed_dim
a : Dict = depths
a : str = num_heads
a : Tuple = window_size
return config
def lowerCamelCase__ ( _A ):
if "patch_embed.proj" in name:
a : Union[str, Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
a : Union[str, Any] = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
a : Tuple = 'encoder.' + name
if "attn.proj" in name:
a : Optional[int] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
a : Union[str, Any] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
a : Optional[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a : Optional[int] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a : Optional[int] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a : Dict = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
a : Optional[Any] = 'layernorm.weight'
if name == "norm.bias":
a : Tuple = 'layernorm.bias'
if "head" in name:
a : str = name.replace('head' , 'classifier' )
else:
a : Tuple = 'swin.' + name
return name
def lowerCamelCase__ ( _A , _A ):
for key in orig_state_dict.copy().keys():
a : Union[str, Any] = orig_state_dict.pop(_A )
if "mask" in key:
continue
elif "qkv" in key:
a : List[str] = key.split('.' )
a : Union[str, Any] = int(key_split[1] )
a : Union[str, Any] = int(key_split[3] )
a : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a : str = val[:dim, :]
a : Dict = val[
dim : dim * 2, :
]
a : Union[str, Any] = val[-dim:, :]
else:
a : Any = val[
:dim
]
a : Dict = val[
dim : dim * 2
]
a : Optional[int] = val[
-dim:
]
else:
a : Dict = val
return orig_state_dict
def lowerCamelCase__ ( _A , _A ):
a : List[str] = timm.create_model(_A , pretrained=_A )
timm_model.eval()
a : Tuple = get_swin_config(_A )
a : Dict = SwinForImageClassification(_A )
model.eval()
a : Any = convert_state_dict(timm_model.state_dict() , _A )
model.load_state_dict(_A )
a : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a : Tuple = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
a : str = Image.open(requests.get(_A , stream=_A ).raw )
a : List[Any] = image_processor(images=_A , return_tensors='pt' )
a : List[Any] = timm_model(inputs['pixel_values'] )
a : Optional[int] = model(**_A ).logits
assert torch.allclose(_A , _A , atol=1E-3 )
print(f"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_A )
if __name__ == "__main__":
lowerCAmelCase: Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swin_name',
default='swin_tiny_patch4_window7_224',
type=str,
help='Name of the Swin timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase: Dict = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 297
|
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase__ ( _A , _A ):
assert isinstance(_A , _A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCamelCase__ ( _A , _A , _A ):
a : str = tmp_path / 'cache'
a : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a : List[Any] = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowerCamelCase__ ( _A , _A , _A ):
a : str = tmp_path / 'cache'
a : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : Dict = features.copy() if features else default_expected_features
a : Union[str, Any] = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : Any = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def lowerCamelCase__ ( _A , _A , _A ):
a : Tuple = tmp_path / 'cache'
a : Optional[Any] = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
a : Optional[int] = features.copy() if features else default_expected_features
a : Dict = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : Optional[int] = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowerCamelCase__ ( _A , _A ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
a : Dict = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
a : int = features.copy()
a : List[Any] = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : Dict = tmp_path / 'cache'
a : Any = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase__ ( _A , _A , _A ):
a : Dict = tmp_path / 'cache'
a : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : List[Any] = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read()
_check_json_dataset(_A , _A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def lowerCamelCase__ ( _A , _A , _A ):
if issubclass(_A , _A ):
a : Optional[int] = jsonl_path
elif issubclass(_A , _A ):
a : Optional[int] = [jsonl_path]
a : List[str] = tmp_path / 'cache'
a : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : Tuple = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
def lowerCamelCase__ ( _A , _A , _A=("train",) ):
assert isinstance(_A , _A )
for split in splits:
a : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCamelCase__ ( _A , _A , _A ):
a : Dict = tmp_path / 'cache'
a : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a : int = JsonDatasetReader({'train': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowerCamelCase__ ( _A , _A , _A ):
a : Dict = tmp_path / 'cache'
a : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : List[Any] = features.copy() if features else default_expected_features
a : Any = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : List[str] = JsonDatasetReader({'train': jsonl_path} , features=_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase__ ( _A , _A , _A ):
if split:
a : Any = {split: jsonl_path}
else:
a : List[Any] = 'train'
a : List[str] = {'train': jsonl_path, 'test': jsonl_path}
a : List[Any] = tmp_path / 'cache'
a : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : Tuple = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase__ ( _A ):
return json.load(_A )
def lowerCamelCase__ ( _A ):
return [json.loads(_A ) for line in buffer]
class a__:
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def lowercase_ ( self : Tuple , __snake_case : int , __snake_case : Optional[int] , __snake_case : Any ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case ).write()
buffer.seek(0 )
a : List[str] = load_json_function(__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(exported_content[0] , __snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def lowercase_ ( self : Tuple , __snake_case : Tuple , __snake_case : Any , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case ).write()
buffer.seek(0 )
a : int = load_json(__snake_case )
assert isinstance(__snake_case , __snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def lowercase_ ( self : List[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , num_proc=2 ).write()
buffer.seek(0 )
a : List[Any] = load_json_function(__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(exported_content[0] , __snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def lowercase_ ( self : Optional[int] , __snake_case : Any , __snake_case : str , __snake_case : int , __snake_case : List[Any] , __snake_case : Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case , num_proc=2 ).write()
buffer.seek(0 )
a : int = load_json(__snake_case )
assert isinstance(__snake_case , __snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
def lowercase_ ( self : List[str] , __snake_case : str ):
with pytest.raises(__snake_case ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def lowercase_ ( self : Tuple , __snake_case : Dict , __snake_case : List[Any] , __snake_case : int , __snake_case : List[str] , __snake_case : Optional[int] ):
a : Tuple = tmp_path_factory.mktemp('data' ) / F"""test.json.{extension}"""
a : List[Any] = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(__snake_case , __snake_case , compression=__snake_case ).write()
with fsspec.open(__snake_case , 'rb' , compression='infer' ) as f:
a : Union[str, Any] = f.read()
with fsspec.open(__snake_case , 'rb' , compression='infer' ) as f:
a : Union[str, Any] = f.read()
assert exported_content == original_content
| 297
| 1
|
def A_ ( _UpperCAmelCase ):
if edge <= 0 or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def A_ ( _UpperCAmelCase ):
if edge <= 0 or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
|
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCAmelCase : Union[str, Any] = get_tests_dir("""fixtures/dummy-config.json""")
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Dict = 0
def _SCREAMING_SNAKE_CASE ( self : Any):
self.assertIsNotNone(transformers.models.auto.__spec__)
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto"))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: List[Any] = AutoConfig.from_pretrained("bert-base-uncased")
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: Optional[Any] = AutoConfig.from_pretrained(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Tuple = AutoConfig.from_pretrained(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[Any] = AutoConfig.for_model("roberta")
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
SCREAMING_SNAKE_CASE_: int = os.path.join(lowerCAmelCase__ , "fake-roberta")
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__)
with open(os.path.join(lowerCAmelCase__ , "config.json") , "w") as f:
f.write(json.dumps({}))
SCREAMING_SNAKE_CASE_: Any = AutoConfig.from_pretrained(lowerCAmelCase__)
self.assertEqual(type(lowerCAmelCase__) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
try:
AutoConfig.register("custom" , lowerCAmelCase__)
# Wrong model type will raise an error
with self.assertRaises(lowerCAmelCase__):
AutoConfig.register("model" , lowerCAmelCase__)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__):
AutoConfig.register("bert" , lowerCAmelCase__)
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE_: List[Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = AutoConfig.from_pretrained(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _SCREAMING_SNAKE_CASE ( self : List[str]):
with self.assertRaisesRegex(
lowerCAmelCase__ , "bert-base is not a local folder and is not a valid model identifier"):
SCREAMING_SNAKE_CASE_: List[str] = AutoConfig.from_pretrained("bert-base")
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
with self.assertRaisesRegex(
lowerCAmelCase__ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"):
SCREAMING_SNAKE_CASE_: str = AutoConfig.from_pretrained(lowerCAmelCase__ , revision="aaaaaa")
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
with self.assertRaisesRegex(
lowerCAmelCase__ , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
SCREAMING_SNAKE_CASE_: int = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo")
def _SCREAMING_SNAKE_CASE ( self : List[str]):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model")
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__)
self.assertEqual(config.__class__.__name__ , "NewModelConfig")
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = AutoConfig.from_pretrained(lowerCAmelCase__ , trust_remote_code=lowerCAmelCase__)
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig")
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : int = '''new-model'''
try:
AutoConfig.register("new-model" , lowerCAmelCase__)
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE_: int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model")
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal")
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE_: Optional[int] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__)
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal")
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE_: int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__)
self.assertEqual(config.__class__.__name__ , "NewModelConfig")
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 127
| 0
|
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : Any = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
lowercase__ : str = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
lowercase__ : Tuple = {
'''vinai/phobert-base''': 2_5_6,
'''vinai/phobert-large''': 2_5_6,
}
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Dict:
lowerCAmelCase = set()
lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase = char
lowerCAmelCase = set(snake_case__ )
return pairs
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = VOCAB_FILES_NAMES
UpperCAmelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , **__SCREAMING_SNAKE_CASE , ) ->Tuple:
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = vocab_file
lowerCAmelCase = merges_file
lowerCAmelCase = {}
lowerCAmelCase = 0
lowerCAmelCase = 1
lowerCAmelCase = 2
lowerCAmelCase = 3
self.add_from_file(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {v: k for k, v in self.encoder.items()}
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase = merges_handle.read().split('''\n''' )[:-1]
lowerCAmelCase = [tuple(merge.split()[:-1] ) for merge in merges]
lowerCAmelCase = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
lowerCAmelCase = {}
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
return len(self.encoder )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[Any]:
if token in self.cache:
return self.cache[token]
lowerCAmelCase = tuple(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowerCAmelCase = get_pairs(__SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
lowerCAmelCase = min(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : self.bpe_ranks.get(__SCREAMING_SNAKE_CASE , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase , lowerCAmelCase = bigram
lowerCAmelCase = []
lowerCAmelCase = 0
while i < len(__SCREAMING_SNAKE_CASE ):
try:
lowerCAmelCase = word.index(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase = j
if word[i] == first and i < len(__SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase = tuple(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = new_word
if len(__SCREAMING_SNAKE_CASE ) == 1:
break
else:
lowerCAmelCase = get_pairs(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = '''@@ '''.join(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = word[:-4]
lowerCAmelCase = word
return word
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = []
lowerCAmelCase = re.findall(R'''\S+\n?''' , __SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(__SCREAMING_SNAKE_CASE ).split(''' ''' ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Dict:
return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Tuple:
return self.decoder.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = ''' '''.join(__SCREAMING_SNAKE_CASE ).replace('''@@ ''' , '''''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
if os.path.abspath(self.merges_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.merges_file , __SCREAMING_SNAKE_CASE )
return out_vocab_file, out_merge_file
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
try:
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(__SCREAMING_SNAKE_CASE )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
lowerCAmelCase = f.readlines()
for lineTmp in lines:
lowerCAmelCase = lineTmp.strip()
lowerCAmelCase = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
lowerCAmelCase = line[:idx]
lowerCAmelCase = len(self.encoder )
| 338
|
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowercase__ : str = logging.get_logger(__name__)
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = """AutoTokenizer"""
UpperCAmelCase_ : Optional[int] = ["""tokenizer"""]
UpperCAmelCase_ : str = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = speaker_embeddings
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , **__SCREAMING_SNAKE_CASE ) ->Tuple:
if speaker_embeddings_dict_path is not None:
lowerCAmelCase = get_file_from_repo(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , )
if speaker_embeddings_path is None:
logger.warning(
F"`{os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
lowerCAmelCase = None
else:
with open(__SCREAMING_SNAKE_CASE ) as speaker_embeddings_json:
lowerCAmelCase = json.load(__SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = None
lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return cls(tokenizer=__SCREAMING_SNAKE_CASE , speaker_embeddings=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , __SCREAMING_SNAKE_CASE="speaker_embeddings" , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ) ->int:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''v2''' ) , exist_ok=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
lowerCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , __SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}.npy" )
lowerCAmelCase = tmp_dict
with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , '''w''' ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase = self.speaker_embeddings[voice_preset]
lowerCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
lowerCAmelCase = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , )
if path is None:
raise ValueError(
F"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE )
return voice_preset_dict
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None ) ->Tuple:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="pt" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) ->int:
if voice_preset is not None and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE )
else:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not voice_preset.endswith('''.npz''' ):
lowerCAmelCase = voice_preset + '''.npz'''
lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE )
if voice_preset is not None:
self._validate_voice_preset_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCAmelCase = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.tokenizer(
__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
if voice_preset is not None:
lowerCAmelCase = voice_preset
return encoded_text
| 338
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase__ : Union[str, Any] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = ['OwlViTFeatureExtractor']
lowercase__ : List[str] = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowercase__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 287
|
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , ['''c'''] )
self.assertEqual(lowerCAmelCase__ , [2] )
# Out indices set to match out features
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(['''a''', '''c'''] , lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , ['''a''', '''c'''] )
self.assertEqual(lowerCAmelCase__ , [0, 2] )
# Out features set to match out indices
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(lowerCAmelCase__ , [0, 2] , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , ['''a''', '''c'''] )
self.assertEqual(lowerCAmelCase__ , [0, 2] )
# Out features selected from negative indices
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(lowerCAmelCase__ , [-3, -1] , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , ['''a''', '''c'''] )
self.assertEqual(lowerCAmelCase__ , [-3, -1] )
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , lowerCAmelCase__ )
# Out features must be a list
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(lowerCAmelCase__ , 0 , ['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(lowerCAmelCase__ , (0, 1) , ['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] )
def snake_case__ ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase = BackboneMixin()
_UpperCamelCase = ['''a''', '''b''', '''c''']
_UpperCamelCase = ['''a''', '''c''']
_UpperCamelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
_UpperCamelCase = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ['''a''', '''b'''] )
self.assertEqual(backbone.out_indices , [0, 1] )
_UpperCamelCase = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 287
| 1
|
from string import ascii_uppercase
UpperCAmelCase : int = {char: i for i, char in enumerate(ascii_uppercase)}
UpperCAmelCase : Optional[Any] = dict(enumerate(ascii_uppercase))
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
a__ : List[str] =len(SCREAMING_SNAKE_CASE )
a__ : str =0
while True:
if x == i:
a__ : Union[str, Any] =0
if len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ):
break
key += key[i]
i += 1
return key
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
a__ : Tuple =""
a__ : Tuple =0
for letter in message:
if letter == " ":
cipher_text += " "
else:
a__ : str =(dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
a__ : Any =""
a__ : Dict =0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
a__ : Tuple =(dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def _A ( ):
"""simple docstring"""
a__ : List[str] ="THE GERMAN ATTACK"
a__ : List[str] ="SECRET"
a__ : Tuple =generate_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : List[str] =cipher_text(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f'''Encrypted Text = {s}''' )
print(f'''Original Text = {original_text(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 95
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Any = """swin2sr"""
_lowercase : Tuple = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , lowerCAmelCase__=6_4 , lowerCAmelCase__=1 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8_0 , lowerCAmelCase__=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__=8 , lowerCAmelCase__=2.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=2 , lowerCAmelCase__=1.0 , lowerCAmelCase__="1conv" , lowerCAmelCase__="pixelshuffle" , **lowerCAmelCase__ , ) -> int:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
a__ : Optional[Any] =image_size
a__ : Dict =patch_size
a__ : Tuple =num_channels
a__ : Union[str, Any] =embed_dim
a__ : Optional[Any] =depths
a__ : List[str] =len(lowerCAmelCase__ )
a__ : Any =num_heads
a__ : Any =window_size
a__ : str =mlp_ratio
a__ : List[str] =qkv_bias
a__ : Dict =hidden_dropout_prob
a__ : List[str] =attention_probs_dropout_prob
a__ : Dict =drop_path_rate
a__ : Optional[Any] =hidden_act
a__ : Union[str, Any] =use_absolute_embeddings
a__ : Optional[Any] =layer_norm_eps
a__ : List[Any] =initializer_range
a__ : int =upscale
a__ : Optional[int] =img_range
a__ : Any =resi_connection
a__ : Optional[Any] =upsampler
| 95
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "facebook/bart-large-mnli"
A_ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
A_ = "text_classifier"
A_ = AutoTokenizer
A_ = AutoModelForSequenceClassification
A_ = ["text", ["text"]]
A_ = ["text"]
def __A ( self: int ) -> str:
super().setup()
_A = self.model.config
_A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
_A = int(__A )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def __A ( self: Union[str, Any] , __A: Union[str, Any] , __A: List[str] ) -> int:
_A = labels
return self.pre_processor(
[text] * len(__A ) , [f"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def __A ( self: str , __A: List[Any] ) -> Union[str, Any]:
_A = outputs.logits
_A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 75
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__A = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(snake_case )
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "rag"
A_ = True
def __init__( self: Optional[int] , __A: Optional[int]=None , __A: Dict=True , __A: Any=None , __A: Dict=None , __A: Optional[int]=None , __A: Optional[Any]=None , __A: Optional[Any]=None , __A: Optional[Any]=" / " , __A: int=" // " , __A: List[Any]=5 , __A: Dict=3_00 , __A: int=7_68 , __A: Tuple=8 , __A: List[Any]="wiki_dpr" , __A: List[str]="train" , __A: Optional[Any]="compressed" , __A: Optional[int]=None , __A: Union[str, Any]=None , __A: Dict=False , __A: Tuple=False , __A: Optional[int]=0.0 , __A: Optional[int]=True , __A: int=False , __A: int=False , __A: Optional[Any]=False , __A: Optional[int]=True , __A: Optional[int]=None , **__A: Optional[Any] , ) -> Union[str, Any]:
super().__init__(
bos_token_id=__A , pad_token_id=__A , eos_token_id=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , is_encoder_decoder=__A , prefix=__A , vocab_size=__A , **__A , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_A = kwargs.pop('''question_encoder''' )
_A = question_encoder_config.pop('''model_type''' )
_A = kwargs.pop('''generator''' )
_A = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_A = AutoConfig.for_model(__A , **__A )
_A = AutoConfig.for_model(__A , **__A )
_A = reduce_loss
_A = label_smoothing
_A = exclude_bos_score
_A = do_marginalize
_A = title_sep
_A = doc_sep
_A = n_docs
_A = max_combined_length
_A = dataset
_A = dataset_split
_A = index_name
_A = retrieval_vector_size
_A = retrieval_batch_size
_A = passages_path
_A = index_path
_A = use_dummy_dataset
_A = output_retrieved
_A = do_deduplication
_A = use_cache
if self.forced_eos_token_id is None:
_A = getattr(self.generator , '''forced_eos_token_id''' , __A )
@classmethod
def __A ( cls: List[Any] , __A: PretrainedConfig , __A: PretrainedConfig , **__A: Optional[int] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__A )
def __A ( self: Optional[Any] ) -> Dict:
_A = copy.deepcopy(self.__dict__ )
_A = self.question_encoder.to_dict()
_A = self.generator.to_dict()
_A = self.__class__.model_type
return output
| 75
| 1
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__lowerCAmelCase : str =logging.get_logger("""transformers.models.speecht5""")
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int ) -> List[str]:
'''simple docstring'''
hf_model.apply_weight_norm()
lowercase = checkpoint["""input_conv.weight_g"""]
lowercase = checkpoint["""input_conv.weight_v"""]
lowercase = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
lowercase = checkpoint[f'upsamples.{i}.1.weight_g']
lowercase = checkpoint[f'upsamples.{i}.1.weight_v']
lowercase = checkpoint[f'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowercase = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_g']
lowercase = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_v']
lowercase = checkpoint[f'blocks.{i}.convs1.{j}.1.bias']
lowercase = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_g']
lowercase = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_v']
lowercase = checkpoint[f'blocks.{i}.convs2.{j}.1.bias']
lowercase = checkpoint["""output_conv.1.weight_g"""]
lowercase = checkpoint["""output_conv.1.weight_v"""]
lowercase = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :str=None , lowerCAmelCase__ :Dict=None , ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
lowercase = SpeechTaHifiGanConfig.from_pretrained(lowerCAmelCase__ )
else:
lowercase = SpeechTaHifiGanConfig()
lowercase = SpeechTaHifiGan(lowerCAmelCase__ )
lowercase = torch.load(lowerCAmelCase__ )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = np.load(lowerCAmelCase__ )
lowercase = stats[0].reshape(-1 )
lowercase = stats[1].reshape(-1 )
lowercase = torch.from_numpy(lowerCAmelCase__ ).float()
lowercase = torch.from_numpy(lowerCAmelCase__ ).float()
model.save_pretrained(lowerCAmelCase__ )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[Any] =argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__lowerCAmelCase : Optional[Any] =parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 197
|
"""simple docstring"""
from __future__ import annotations
from scipy.special import comb # type: ignore
class _A :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowercase = len(__lowerCAmelCase ) - 1
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowercase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__lowerCAmelCase ) , 5 ) == 1
return output_values
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowercase = self.basis_function(__lowerCAmelCase )
lowercase = 0.0
lowercase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def A__ ( self , __lowerCAmelCase = 0.0_1 ):
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
lowercase = [] # x coordinates of points to plot
lowercase = [] # y coordinates of points to plot
lowercase = 0.0
while t <= 1:
lowercase = self.bezier_curve_function(__lowerCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowercase = [i[0] for i in self.list_of_points]
lowercase = [i[1] for i in self.list_of_points]
plt.plot(
__lowerCAmelCase , __lowerCAmelCase , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(__lowerCAmelCase , __lowerCAmelCase , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 197
| 1
|
"""simple docstring"""
from __future__ import annotations
lowercase__ : int = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __lowercase ( _a , _a , _a , _a , _a , ):
snake_case_ : List[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(a__ ) )
] # the reference grid
snake_case_ : List[Any] = 1
snake_case_ : Tuple = [
[0 for col in range(len(grid[0] ) )] for row in range(len(a__ ) )
] # the action grid
snake_case_ : str = init[0]
snake_case_ : Any = init[1]
snake_case_ : Tuple = 0
snake_case_ : List[str] = g + heuristic[x][y] # cost from starting cell to destination cell
snake_case_ : int = [[f, g, x, y]]
snake_case_ : List[str] = False # flag that is set when search is complete
snake_case_ : Any = False # flag set if we can't find expand
while not found and not resign:
if len(a__ ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
snake_case_ : Tuple = cell.pop()
snake_case_ : int = next_cell[2]
snake_case_ : int = next_cell[3]
snake_case_ : str = next_cell[1]
if x == goal[0] and y == goal[1]:
snake_case_ : int = True
else:
for i in range(len(a__ ) ): # to try out different valid actions
snake_case_ : Union[str, Any] = x + DIRECTIONS[i][0]
snake_case_ : Union[str, Any] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(a__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
snake_case_ : Tuple = g + cost
snake_case_ : List[str] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
snake_case_ : Union[str, Any] = 1
snake_case_ : Any = i
snake_case_ : Dict = []
snake_case_ : Any = goal[0]
snake_case_ : int = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
snake_case_ : Optional[int] = x - DIRECTIONS[action[x][y]][0]
snake_case_ : Optional[int] = y - DIRECTIONS[action[x][y]][1]
snake_case_ : Any = xa
snake_case_ : str = ya
invpath.append([x, y] )
snake_case_ : Optional[int] = []
for i in range(len(a__ ) ):
path.append(invpath[len(a__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
lowercase__ : Optional[Any] = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
lowercase__ : Optional[Any] = [0, 0]
# all coordinates are given in format [y,x]
lowercase__ : Optional[int] = [len(grid) - 1, len(grid[0]) - 1]
lowercase__ : Optional[int] = 1
# the cost map which pushes the path closer to the goal
lowercase__ : Tuple = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
lowercase__ : Any = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
lowercase__ : Optional[int] = 99
lowercase__ ,lowercase__ : Optional[int] = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 360
|
"""simple docstring"""
import math
import sys
def __lowercase ( _a ):
if number != int(_a ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
snake_case_ : int = [-1] * (number + 1)
snake_case_ : int = 0
for i in range(1 , number + 1 ):
snake_case_ : Tuple = sys.maxsize
snake_case_ : List[Any] = int(math.sqrt(_a ) )
for j in range(1 , root + 1 ):
snake_case_ : Dict = 1 + answers[i - (j**2)]
snake_case_ : int = min(_a , _a )
snake_case_ : Any = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 50 ) ->int:
'''simple docstring'''
a : Optional[Any] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 105
|
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
_SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , *__snake_case , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case ):
super().__init__(*__snake_case , **__snake_case )
snake_case = eval_examples
snake_case = post_process_function
snake_case = quant_trainer_args
snake_case = 1_2_8 # default number of calibration samples
def a_ ( self , __snake_case=None ):
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
snake_case = calib_dataset if calib_dataset is not None else self.calib_dataset
snake_case = self._remove_unused_columns(__snake_case , description='''Calibration''' )
return DataLoader(
__snake_case , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__snake_case , )
def a_ ( self , __snake_case=None ):
snake_case = self.train_dataset if calib_dataset is None else calib_dataset
snake_case = self.get_calib_dataloader(__snake_case )
snake_case = self.model
quant_trainer.configure_model(__snake_case , self.quant_trainer_args , calib=__snake_case )
model.eval()
quant_trainer.enable_calibration(__snake_case )
logger.info('''***** Running calibration *****''' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(__snake_case ):
# Prediction step
snake_case , snake_case , snake_case = self.prediction_step(__snake_case , __snake_case , prediction_loss_only=__snake_case )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__snake_case , self.quant_trainer_args )
snake_case = model
def a_ ( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case = "eval" ):
snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
snake_case = self.get_eval_dataloader(__snake_case )
snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
snake_case = self.compute_metrics
snake_case = None
snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case = eval_loop(
__snake_case , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__snake_case , )
finally:
snake_case = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
snake_case = self.post_process_function(__snake_case , __snake_case , output.predictions )
snake_case = self.compute_metrics(__snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case = metrics.pop(__snake_case )
self.log(__snake_case )
else:
snake_case = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , __snake_case )
return metrics
def a_ ( self , __snake_case , __snake_case , __snake_case=None , __snake_case = "test" ):
snake_case = self.get_test_dataloader(__snake_case )
# Temporarily disable metric computation, we will do it in the loop here.
snake_case = self.compute_metrics
snake_case = None
snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case = eval_loop(
__snake_case , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__snake_case , )
finally:
snake_case = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
snake_case = self.post_process_function(__snake_case , __snake_case , output.predictions , '''predict''' )
snake_case = self.compute_metrics(__snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case = metrics.pop(__snake_case )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__snake_case )
def a_ ( self , __snake_case="./" ):
snake_case = self.eval_dataset
snake_case = self.get_eval_dataloader(__snake_case )
snake_case = next(iter(__snake_case ) )
# saving device - to make it consistent
snake_case = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
snake_case = tuple(v.to(__snake_case ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
snake_case = True
snake_case = self.model.to(__snake_case )
model.eval()
model.float()
snake_case = model.module if hasattr(__snake_case , '''module''' ) else model
quant_trainer.configure_model(__snake_case , self.quant_trainer_args )
snake_case = os.path.join(__snake_case , '''model.onnx''' )
logger.info(F'''exporting model to {output_model_file}''' )
snake_case = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
__snake_case , __snake_case , __snake_case , export_params=__snake_case , opset_version=1_3 , do_constant_folding=__snake_case , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=__snake_case , )
logger.info('''onnx export finished''' )
| 127
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__A = None
__A = logging.get_logger(__name__)
__A = '▁'
__A = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__A = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
__A = {
'google/pegasus-xsum': 512,
}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = PegasusTokenizer
lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<pad>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<mask_2>" , __UpperCAmelCase="<mask_1>" , __UpperCAmelCase=None , __UpperCAmelCase=1_03 , **__UpperCAmelCase , ) -> Any:
_lowerCAmelCase =offset
if additional_special_tokens is not None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(__UpperCAmelCase )}, but is'''
f''' {type(__UpperCAmelCase )}''' )
_lowerCAmelCase =(
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(__UpperCAmelCase ) , self.offset - 1 )
]
if len(set(__UpperCAmelCase ) ) != len(__UpperCAmelCase ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
_lowerCAmelCase =additional_special_tokens_extended
else:
_lowerCAmelCase =[mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , pad_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , mask_token_sent=__UpperCAmelCase , offset=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
_lowerCAmelCase =vocab_file
_lowerCAmelCase =False if not self.vocab_file else True
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
_lowerCAmelCase =set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(__UpperCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(__UpperCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 341
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''cvt'''
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[64, 1_92, 3_84] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[4.0, 4.0, 4.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.1] , __UpperCAmelCase=[True, True, True] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __UpperCAmelCase=[3, 3, 3] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-12 , **__UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**__UpperCAmelCase )
_lowerCAmelCase =num_channels
_lowerCAmelCase =patch_sizes
_lowerCAmelCase =patch_stride
_lowerCAmelCase =patch_padding
_lowerCAmelCase =embed_dim
_lowerCAmelCase =num_heads
_lowerCAmelCase =depth
_lowerCAmelCase =mlp_ratio
_lowerCAmelCase =attention_drop_rate
_lowerCAmelCase =drop_rate
_lowerCAmelCase =drop_path_rate
_lowerCAmelCase =qkv_bias
_lowerCAmelCase =cls_token
_lowerCAmelCase =qkv_projection_method
_lowerCAmelCase =kernel_qkv
_lowerCAmelCase =padding_kv
_lowerCAmelCase =stride_kv
_lowerCAmelCase =padding_q
_lowerCAmelCase =stride_q
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
| 341
| 1
|
def _a ( lowerCamelCase, lowerCamelCase ):
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
lowerCamelCase : Dict = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
lowerCamelCase : List[Any] = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
lowerCamelCase : str = max(len(lowerCamelCase ), len(lowerCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase ), b_binary.zfill(lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287
|
_lowerCamelCase ={
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0_2_1_7_6_6_3_4E-1_9,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355818,
}
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase : Dict = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287
| 1
|
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__snake_case :Dict = logging.getLogger(__name__)
@dataclass(frozen=__UpperCAmelCase )
class _A :
UpperCamelCase__ : Optional[int] = 42
UpperCamelCase__ : List[str] = 42
UpperCamelCase__ : Tuple = None
UpperCamelCase__ : Union[str, Any] = None
UpperCamelCase__ : Tuple = None
@dataclass(frozen=__UpperCAmelCase )
class _A :
UpperCamelCase__ : Optional[int] = 42
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : Dict = None
UpperCamelCase__ : Union[str, Any] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : str = 42
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : bool = False , ):
'''simple docstring'''
__a = hans_processors[task]()
__a = os.path.join(
__A , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(__A) , __A , ) , )
__a = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__a , __a = label_list[2], label_list[1]
__a = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__a = cached_features_file + '''.lock'''
with FileLock(__A):
if os.path.exists(__A) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}')
__a = torch.load(__A)
else:
logger.info(F'Creating features from dataset file at {data_dir}')
__a = (
processor.get_dev_examples(__A) if evaluate else processor.get_train_examples(__A)
)
logger.info('''Training examples: %s''' , len(__A))
__a = hans_convert_examples_to_features(__A , __A , __A , __A)
logger.info('''Saving features into cached file %s''' , __A)
torch.save(self.features , __A)
def __len__( self : Tuple):
'''simple docstring'''
return len(self.features)
def __getitem__( self : str , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
return self.features[i]
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _A :
UpperCamelCase__ : Tuple = 42
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] = 128 , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : bool = False , ):
'''simple docstring'''
__a = hans_processors[task]()
__a = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__a , __a = label_list[2], label_list[1]
__a = label_list
__a = processor.get_dev_examples(__A) if evaluate else processor.get_train_examples(__A)
__a = hans_convert_examples_to_features(__A , __A , __A , __A)
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features) , desc='''convert examples to features'''):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(__A)))
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__a = tf.data.Dataset.from_generator(
__A , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([]),
'''input_ids''': tf.TensorShape([None, None]),
'''attention_mask''': tf.TensorShape([None, None]),
'''token_type_ids''': tf.TensorShape([None, None]),
},
tf.TensorShape([]),
) , )
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
return self.dataset
def __len__( self : Dict):
'''simple docstring'''
return len(self.features)
def __getitem__( self : List[str] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
return self.features[i]
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
return self.label_list
class _A ( __UpperCAmelCase ):
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(__A , '''heuristics_train_set.txt''')) , '''train''')
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(__A , '''heuristics_evaluation_set.txt''')) , '''dev''')
def _lowerCamelCase ( self : Any):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = []
for i, line in enumerate(__A):
if i == 0:
continue
__a = '''%s-%s''' % (set_type, line[0])
__a = line[5]
__a = line[6]
__a = line[7][2:] if line[7].startswith('''ex''') else line[7]
__a = line[0]
examples.append(InputExample(guid=__A , text_a=__A , text_b=__A , label=__A , pairID=__A))
return examples
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a = {label: i for i, label in enumerate(_lowercase )}
__a = []
for ex_index, example in tqdm.tqdm(enumerate(_lowercase ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__a = tokenizer(
example.text_a , example.text_b , add_special_tokens=_lowercase , max_length=_lowercase , padding='''max_length''' , truncation=_lowercase , return_overflowing_tokens=_lowercase , )
__a = label_map[example.label] if example.label in label_map else 0
__a = int(example.pairID )
features.append(InputFeatures(**_lowercase , label=_lowercase , pairID=_lowercase ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f'guid: {example}' )
logger.info(f'features: {features[i]}' )
return features
__snake_case :List[Any] = {
'''hans''': 3,
}
__snake_case :List[Any] = {
'''hans''': HansProcessor,
}
| 359
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case :Tuple = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Optional[int] = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
__snake_case :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 131
| 0
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ : Optional[int] = 16
a_ : Union[str, Any] = 32
def a_ ( __snake_case : Accelerator , __snake_case : int = 16 ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCamelCase_ =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__snake_case : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_ =datasets.map(
__snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__snake_case : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_ =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_ =16
elif accelerator.mixed_precision != "no":
lowerCamelCase_ =8
else:
lowerCamelCase_ =None
return tokenizer.pad(
__snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCamelCase_ =DataLoader(
tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
lowerCamelCase_ =DataLoader(
tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ : Optional[int] = mocked_dataloaders # noqa: F811
def a_ ( __snake_case : Union[str, Any] , __snake_case : Optional[int] ) -> str:
"""simple docstring"""
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __snake_case ) == "1":
lowerCamelCase_ =2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCamelCase_ =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
lowerCamelCase_ =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ =config['''lr''']
lowerCamelCase_ =int(config['''num_epochs'''] )
lowerCamelCase_ =int(config['''seed'''] )
lowerCamelCase_ =int(config['''batch_size'''] )
set_seed(__snake_case )
lowerCamelCase_, lowerCamelCase_ =get_dataloaders(__snake_case , __snake_case )
lowerCamelCase_ =evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
lowerCamelCase_ =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase_ =batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase_ =MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ =model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ =AdamW(params=model.parameters() , lr=__snake_case )
# Instantiate scheduler
lowerCamelCase_ =get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCamelCase_ =os.path.split(__snake_case )[-1].split('''.''' )[0]
accelerator.init_trackers(__snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCamelCase_ =0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase_ =model(**__snake_case )
lowerCamelCase_ =outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCamelCase_ =loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ =model(**__snake_case )
lowerCamelCase_ =outputs.logits.argmax(dim=-1 )
lowerCamelCase_, lowerCamelCase_ =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
lowerCamelCase_ =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __snake_case )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(__snake_case ),
'''epoch''': epoch,
} , step=__snake_case , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def a_ ( ) -> int:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=__snake_case , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
lowerCamelCase_ =parser.parse_args()
lowerCamelCase_ ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 75
|
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =FunnelTokenizer
lowercase : List[str] =FunnelTokenizerFast
lowercase : Union[str, Any] =True
lowercase : int =True
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase_ =[
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ='''UNwant\u00E9d,running'''
lowerCamelCase_ ='''unwanted, running'''
return input_text, output_text
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer_class(self.vocab_file )
lowerCamelCase_ =tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(lowerCAmelCase, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [7, 4, 5, 10, 8, 9] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizers(do_lower_case=lowerCAmelCase )
for tokenizer in tokenizers:
lowerCamelCase_ =tokenizer('''UNwant\u00E9d,running''' )
lowerCamelCase_ =len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''], [2] + [0] * sentence_len )
lowerCamelCase_ =tokenizer('''UNwant\u00E9d,running''', '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''], [2] + [0] * sentence_len + [1] * sentence_len )
| 75
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
lowerCamelCase : Union[str, Any] = "\nHuman: <<task>>\n\nAssistant: "
lowerCamelCase : Dict = "huggingface-tools/default-prompts"
lowerCamelCase : int = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def _lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any]="run" ) -> List[Any]:
"""simple docstring"""
if prompt_or_repo_id is None:
_SCREAMING_SNAKE_CASE =DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , _UpperCamelCase ) is not None:
return prompt_or_repo_id
_SCREAMING_SNAKE_CASE =cached_file(
_UpperCamelCase , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(_UpperCamelCase , 'r' , encoding='utf-8' ) as f:
return f.read()
| 114
|
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , _UpperCamelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
assert _test_patching.open is open
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , _UpperCamelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _lowerCAmelCase ( ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , _UpperCamelCase ):
pass
def _lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , _UpperCamelCase ) is None
with patch_submodule(_test_patching , 'len' , _UpperCamelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _lowerCAmelCase ( ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_start_and_stop_mock__'
_SCREAMING_SNAKE_CASE =patch_submodule(_test_patching , 'open' , _UpperCamelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_successive_join__'
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_successive_dirname__'
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , _UpperCamelCase ):
with patch_submodule(_test_patching , 'os.rename' , _UpperCamelCase ):
with patch_submodule(_test_patching , 'os.path.dirname' , _UpperCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , _UpperCamelCase ):
with patch_submodule(_test_patching , 'os.path.join' , _UpperCamelCase ):
with patch_submodule(_test_patching , 'os.path.dirname' , _UpperCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , _UpperCamelCase ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , _UpperCamelCase ):
pass
| 114
| 1
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __snake_case ( a ):
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(_snake_case)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self._create_example_records()
UpperCAmelCase_ = Dataset.from_list(_snake_case)
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''])
for i, r in enumerate(_snake_case):
self.assertDictEqual(_snake_case , example_records[i])
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self._create_example_records()
UpperCAmelCase_ = Dataset.from_list(_snake_case)
UpperCAmelCase_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]})
self.assertEqual(dset.info , dset_from_dict.info)
def lowerCamelCase ( self : Optional[Any]): # checks what happens with missing columns
"""simple docstring"""
UpperCAmelCase_ = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
UpperCAmelCase_ = Dataset.from_list(_snake_case)
self.assertDictEqual(dset[0] , {'''col_1''': 1})
self.assertDictEqual(dset[1] , {'''col_1''': None}) # NB: first record is used for columns
def lowerCamelCase ( self : Any): # checks if the type can be inferred from the second record
"""simple docstring"""
UpperCAmelCase_ = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
UpperCAmelCase_ = Dataset.from_list(_snake_case)
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''')))
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = Dataset.from_list([])
self.assertEqual(len(_snake_case) , 0)
self.assertListEqual(dset.column_names , [])
| 51
|
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
a = {
'169M': 1_2,
'430M': 2_4,
'1B5': 2_4,
'3B': 3_2,
'7B': 3_2,
'14B': 4_0,
}
a = {
'169M': 7_6_8,
'430M': 1_0_2_4,
'1B5': 2_0_4_8,
'3B': 2_5_6_0,
'7B': 4_0_9_6,
'14B': 5_1_2_0,
}
def lowercase (snake_case__ : str ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase = list(state_dict.keys() )
for name in state_dict_keys:
lowerCAmelCase = state_dict.pop(snake_case__ )
# emb -> embedding
if name.startswith("""emb.""" ):
lowerCAmelCase = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
lowerCAmelCase = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
lowerCAmelCase = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , snake_case__ )
# ffn -> feed_forward
lowerCAmelCase = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , snake_case__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
lowerCAmelCase = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
lowerCAmelCase = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
lowerCAmelCase = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
lowerCAmelCase = """rwkv.""" + name
lowerCAmelCase = weight
return state_dict
def lowercase (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : int=None , snake_case__ : Any=None , snake_case__ : Optional[int]=False , snake_case__ : List[str]=None ) -> Optional[Any]:
'''simple docstring'''
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
lowerCAmelCase = 50_277
lowerCAmelCase = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
lowerCAmelCase = PreTrainedTokenizerFast(tokenizer_file=snake_case__ )
lowerCAmelCase = len(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
# 2. Build the config
lowerCAmelCase = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
lowerCAmelCase = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(f'''`size` should be one of {possible_sizes}, got {size}.''' )
lowerCAmelCase = RwkvConfig(
vocab_size=snake_case__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(snake_case__ )
# 3. Download model file then convert state_dict
lowerCAmelCase = hf_hub_download(snake_case__ , snake_case__ )
lowerCAmelCase = torch.load(snake_case__ , map_location="""cpu""" )
lowerCAmelCase = convert_state_dict(snake_case__ )
# 4. Split in shards and save
lowerCAmelCase , lowerCAmelCase = shard_checkpoint(snake_case__ )
for shard_file, shard in shards.items():
torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
if index is not None:
lowerCAmelCase = os.path.join(snake_case__ , snake_case__ )
# Save the index as well
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as f:
lowerCAmelCase = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + """\n"""
f.write(snake_case__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
lowerCAmelCase = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
lowerCAmelCase = torch.load(os.path.join(snake_case__ , snake_case__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case__ , snake_case__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(snake_case__ )
model.push_to_hub(snake_case__ , max_shard_size="""2GB""" )
tokenizer.push_to_hub(snake_case__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
a = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 155
| 0
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_snake_case = "3"
print("Python version:", sys.version)
print("OS platform:", platform.platform())
print("OS architecture:", platform.machine())
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
except ImportError:
print("Torch version:", None)
try:
import transformers
print("transformers version:", transformers.__version__)
except ImportError:
print("transformers version:", None)
| 343
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
# Load checkpoint
_A : Optional[int] = torch.load(snake_case_,map_location="""cpu""" )
_A : Any = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
_A : Any = {}
for k, v in state_dict.items():
if "pred_layer" in k:
_A : Tuple = v
else:
_A : Dict = v
_A : Optional[Any] = chkpt["""params"""]
_A : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(snake_case_,(torch.FloatTensor, numpy.ndarray) )}
_A : str = chkpt["""dico_word2id"""]
_A : Optional[Any] = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""","""""" ): i for s, i in vocab.items()}
# Save pytorch-model
_A : Dict = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
_A : Any = pytorch_dump_folder_path + """/""" + CONFIG_NAME
_A : Optional[int] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(snake_case_,snake_case_ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(snake_case_,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_,indent=2 ) + """\n""" )
print(f'''Save vocab file to {pytorch_config_dump_path}''' )
with open(snake_case_,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_,indent=2 ) + """\n""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 343
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__lowerCAmelCase = None
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = '▁'
__lowerCAmelCase = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
__lowerCAmelCase = {
'google/pegasus-xsum': 512,
}
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PegasusTokenizer
lowerCAmelCase_ = ["input_ids", "attention_mask"]
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<pad>" , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<mask_2>" , UpperCAmelCase="<mask_1>" , UpperCAmelCase=None , UpperCAmelCase=103 , **UpperCAmelCase , ) -> int:
_snake_case = offset
if additional_special_tokens is not None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise TypeError(
f"""additional_special_tokens should be of type {type(UpperCAmelCase )}, but is"""
f""" {type(UpperCAmelCase )}""" )
_snake_case = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(UpperCAmelCase ) , self.offset - 1 )
]
if len(set(UpperCAmelCase ) ) != len(UpperCAmelCase ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
_snake_case = additional_special_tokens_extended
else:
_snake_case = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , pad_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , mask_token=UpperCAmelCase , mask_token_sent=UpperCAmelCase , offset=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
def lowercase (self , UpperCAmelCase ) -> Any:
_snake_case = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(UpperCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(UpperCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase (self , UpperCAmelCase , UpperCAmelCase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_snake_case = os.path.join(
UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 341
|
'''simple docstring'''
__lowerCAmelCase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
# Make sure the supplied data is a bytes-like object
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = f"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(_SCREAMING_SNAKE_CASE )
_snake_case = """""".join(bin(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data )
_snake_case = len(_SCREAMING_SNAKE_CASE ) % 6 != 0
if padding_needed:
# The padding that will be added later
_snake_case = b"""=""" * ((6 - len(_SCREAMING_SNAKE_CASE ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_SCREAMING_SNAKE_CASE ) % 6)
else:
_snake_case = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 6 ) ).encode()
+ padding
)
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = (
"""argument should be a bytes-like object or ASCII string, """
f"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(_SCREAMING_SNAKE_CASE )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
try:
_snake_case = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
_snake_case = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_snake_case = encoded_data[:-padding]
_snake_case = """""".join(
bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_snake_case = """""".join(
bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )
_snake_case = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 8 )
]
return bytes(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341
| 1
|
import numpy as np
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None ):
"""simple docstring"""
self.set_matricies(red=__snake_case , green=__snake_case , blue=__snake_case , red_edge=__snake_case , nir=__snake_case )
def snake_case ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None ):
"""simple docstring"""
if red is not None:
snake_case = red
if green is not None:
snake_case = green
if blue is not None:
snake_case = blue
if red_edge is not None:
snake_case = red_edge
if nir is not None:
snake_case = nir
return True
def snake_case ( self , lowerCAmelCase="" , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None ):
"""simple docstring"""
self.set_matricies(red=__snake_case , green=__snake_case , blue=__snake_case , red_edge=__snake_case , nir=__snake_case )
snake_case = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def snake_case ( self ):
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def snake_case ( self ):
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def snake_case ( self ):
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def snake_case ( self ):
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def snake_case ( self ):
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def snake_case ( self ):
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def snake_case ( self ):
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def snake_case ( self ):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def snake_case ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def snake_case ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def snake_case ( self ):
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def snake_case ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def snake_case ( self , lowerCAmelCase=0.08 , lowerCAmelCase=1.22 , lowerCAmelCase=0.03 ):
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def snake_case ( self ):
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def snake_case ( self ):
"""simple docstring"""
return (self.nir / self.green) - 1
def snake_case ( self ):
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def snake_case ( self ):
"""simple docstring"""
return (self.red - self.blue) / self.red
def snake_case ( self ):
"""simple docstring"""
snake_case = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def snake_case ( self ):
"""simple docstring"""
return self.nir - self.green
def snake_case ( self ):
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def snake_case ( self ):
"""simple docstring"""
snake_case = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def snake_case ( self , lowerCAmelCase=0.16 ):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def snake_case ( self , lowerCAmelCase=0.5 ):
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def snake_case ( self ):
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def snake_case ( self , lowerCAmelCase=None , lowerCAmelCase=None ):
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def snake_case ( self ):
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def snake_case ( self ):
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def snake_case ( self ):
"""simple docstring"""
return self.nir / self.red
def snake_case ( self ):
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def snake_case ( self ):
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def snake_case ( self ):
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def snake_case ( self ):
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def snake_case ( self ):
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def snake_case ( self ):
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def snake_case ( self ):
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def snake_case ( self ):
"""simple docstring"""
snake_case = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
snake_case = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def snake_case ( self ):
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def snake_case ( self ):
"""simple docstring"""
return self.nir / self.red
def snake_case ( self ):
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def snake_case ( self ):
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 358
|
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {str(digit): digit**5 for digit in range(10)}
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_UpperCamelCase ) )
def lowerCAmelCase__ ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(_UpperCamelCase ) )
if __name__ == "__main__":
print(solution())
| 149
| 0
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __snake_case :
def __init__( self : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=1_3 , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=9_9 , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : Optional[int]=5 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Tuple=3_7 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Any=5_1_2 , __lowerCAmelCase : List[str]=1_6 , __lowerCAmelCase : int=2 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : List[str]=None , ):
"""simple docstring"""
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Optional[Any] = seq_length
_lowerCamelCase : Optional[Any] = is_training
_lowerCamelCase : Optional[int] = use_token_type_ids
_lowerCamelCase : str = use_labels
_lowerCamelCase : Optional[Any] = vocab_size
_lowerCamelCase : int = hidden_size
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : List[Any] = num_attention_heads
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Optional[int] = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : Optional[int] = type_vocab_size
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Union[str, Any] = num_labels
_lowerCamelCase : Any = num_choices
_lowerCamelCase : str = scope
_lowerCamelCase : List[Any] = self.vocab_size - 1
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : str = None
if self.use_token_type_ids:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : int = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : Optional[int] = None
if self.use_labels:
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : Any = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , *__lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Tuple = OpenAIGPTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_lowerCamelCase : Dict = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
_lowerCamelCase : int = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , *__lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = OpenAIGPTLMHeadModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , *__lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Any = OpenAIGPTDoubleHeadsModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : int = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , *__lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.num_labels
_lowerCamelCase : int = OpenAIGPTForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Tuple = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : str = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : List[Any] = config_and_inputs
_lowerCamelCase : str = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , _lowercase , unittest.TestCase):
snake_case__ : str = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
snake_case__ : str = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
snake_case__ : str = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict=False ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_lowerCamelCase : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase , )
_lowerCamelCase : Any = inputs_dict['''labels''']
_lowerCamelCase : Optional[int] = inputs_dict['''labels''']
_lowerCamelCase : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCAmelCase , )
_lowerCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : int = OpenAIGPTModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self , config_class=__lowerCAmelCase , n_embd=3_7 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = OpenAIGPTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
class __snake_case ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(__lowerCAmelCase )
_lowerCamelCase : Tuple = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=__lowerCAmelCase ) # the president is
_lowerCamelCase : List[str] = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_lowerCamelCase : int = model.generate(__lowerCAmelCase , do_sample=__lowerCAmelCase )
self.assertListEqual(output_ids[0].tolist() , __lowerCAmelCase )
| 72
|
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowerCamelCase = logging.get_logger(__name__)
class _a ( _lowercase):
def __init__( self : Optional[int] , *_SCREAMING_SNAKE_CASE : Optional[Any] , **_SCREAMING_SNAKE_CASE : Optional[Any] )-> None:
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 131
| 0
|
'''simple docstring'''
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def lowerCamelCase ( lowerCAmelCase : Dict ):
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ : Any = create_tensor(lowerCAmelCase )
__magic_name__ : Any = gather(lowerCAmelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def lowerCamelCase ( lowerCAmelCase : Dict ):
"""simple docstring"""
__magic_name__ : List[Any] = [state.process_index]
__magic_name__ : Any = gather_object(lowerCAmelCase )
assert len(lowerCAmelCase ) == state.num_processes, f'{gathered_obj}, {len(lowerCAmelCase )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), f'{gathered_obj} != {list(range(state.num_processes ) )}'
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ : Optional[int] = create_tensor(lowerCAmelCase )
__magic_name__ : Optional[Any] = broadcast(lowerCAmelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
if state.is_main_process:
__magic_name__ : Optional[Any] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
__magic_name__ : Union[str, Any] = torch.arange(state.num_processes ).to(state.device )
__magic_name__ : List[Any] = pad_across_processes(lowerCAmelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def lowerCamelCase ( lowerCAmelCase : List[Any] ):
"""simple docstring"""
if state.num_processes != 2:
return
__magic_name__ : Optional[Any] = create_tensor(lowerCAmelCase )
__magic_name__ : int = reduce(lowerCAmelCase , 'sum' )
__magic_name__ : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowerCAmelCase , lowerCAmelCase ), f'{reduced_tensor} != {truth_tensor}'
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
if state.num_processes != 2:
return
__magic_name__ : List[str] = create_tensor(lowerCAmelCase )
__magic_name__ : int = reduce(lowerCAmelCase , 'mean' )
__magic_name__ : Optional[Any] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowerCAmelCase , lowerCAmelCase ), f'{reduced_tensor} != {truth_tensor}'
def lowerCamelCase ( lowerCAmelCase : Any ):
"""simple docstring"""
main()
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Tuple = PartialState()
state.print(f'State: {state}' )
state.print('testing gather' )
test_gather(lowerCAmelCase )
state.print('testing gather_object' )
test_gather_object(lowerCAmelCase )
state.print('testing broadcast' )
test_broadcast(lowerCAmelCase )
state.print('testing pad_across_processes' )
test_pad_across_processes(lowerCAmelCase )
state.print('testing reduce_sum' )
test_reduce_sum(lowerCAmelCase )
state.print('testing reduce_mean' )
test_reduce_mean(lowerCAmelCase )
if __name__ == "__main__":
main()
| 363
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : int = StableDiffusionXLImgaImgPipeline
A_ : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
A_ : Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
A_ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
A_ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self : Dict ) -> Optional[int]:
torch.manual_seed(0 )
__magic_name__ : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=_A , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__magic_name__ : str = EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
__magic_name__ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__magic_name__ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=32 , )
__magic_name__ : Dict = CLIPTextModel(_A )
__magic_name__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=_A )
__magic_name__ : Optional[Any] = CLIPTextModelWithProjection(_A )
__magic_name__ : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=_A )
__magic_name__ : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def __lowerCAmelCase ( self : List[Any] , _A : List[str] , _A : Any=0 ) -> Union[str, Any]:
__magic_name__ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Dict = image / 2 + 0.5
if str(_A ).startswith('mps' ):
__magic_name__ : Any = torch.manual_seed(_A )
else:
__magic_name__ : int = torch.Generator(device=_A ).manual_seed(_A )
__magic_name__ : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
__magic_name__ : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
__magic_name__ : str = self.get_dummy_components()
__magic_name__ : Any = StableDiffusionXLImgaImgPipeline(**_A )
__magic_name__ : List[Any] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
__magic_name__ : Any = self.get_dummy_inputs(_A )
__magic_name__ : Optional[int] = sd_pipe(**_A ).images
__magic_name__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__magic_name__ : Any = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __lowerCAmelCase ( self : List[Any] ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
pass
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
__magic_name__ : Dict = self.get_dummy_components()
__magic_name__ : Optional[Any] = StableDiffusionXLImgaImgPipeline(**_A )
__magic_name__ : List[Any] = sd_pipe.to(_A )
__magic_name__ : str = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
# forward without prompt embeds
__magic_name__ : Union[str, Any] = self.get_dummy_inputs(_A )
__magic_name__ : Union[str, Any] = 3 * ['this is a negative prompt']
__magic_name__ : List[str] = negative_prompt
__magic_name__ : int = 3 * [inputs['prompt']]
__magic_name__ : Tuple = sd_pipe(**_A )
__magic_name__ : str = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__magic_name__ : Optional[Any] = self.get_dummy_inputs(_A )
__magic_name__ : Tuple = 3 * ['this is a negative prompt']
__magic_name__ : List[str] = 3 * [inputs.pop('prompt' )]
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : List[Any] = sd_pipe.encode_prompt(_A , negative_prompt=_A )
__magic_name__ : Tuple = sd_pipe(
**_A , prompt_embeds=_A , negative_prompt_embeds=_A , pooled_prompt_embeds=_A , negative_pooled_prompt_embeds=_A , )
__magic_name__ : int = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : str , _A : Optional[int] , _A : Optional[Any]="cpu" , _A : List[str]=torch.floataa , _A : Any=0 ) -> str:
__magic_name__ : List[str] = torch.Generator(device=_A ).manual_seed(_A )
__magic_name__ : Optional[Any] = np.random.RandomState(_A ).standard_normal((1, 4, 64, 64) )
__magic_name__ : Union[str, Any] = torch.from_numpy(_A ).to(device=_A , dtype=_A )
__magic_name__ : Optional[int] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
__magic_name__ : str = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__magic_name__ : Optional[int] = self.get_inputs(_A )
__magic_name__ : Union[str, Any] = pipe(**_A ).images
__magic_name__ : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__magic_name__ : List[Any] = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 275
| 0
|
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : float , __lowerCamelCase : float ):
return round(float(moles / volume ) * nfactor )
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ):
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ):
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ):
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114
|
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
a : Optional[Any] = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
a : Optional[Any] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : str = 'maskformer'
a : Dict = {'hidden_size': 'mask_feature_size'}
a : Optional[Any] = ['resnet', 'swin']
a : List[Any] = ['detr']
def __init__( self : Optional[int] , __lowercase : int = 256 , __lowercase : int = 256 , __lowercase : float = 0.1 , __lowercase : bool = False , __lowercase : Optional[Dict] = None , __lowercase : Optional[Dict] = None , __lowercase : float = 0.02 , __lowercase : float = 1.0 , __lowercase : float = 1.0 , __lowercase : float = 1.0 , __lowercase : float = 20.0 , __lowercase : Optional[bool] = None , **__lowercase : Tuple , ) -> str:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__UpperCAmelCase : List[str] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Dict = backbone_config.pop("""model_type""" )
__UpperCAmelCase : Tuple = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : List[str] = config_class.from_dict(__lowercase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__UpperCAmelCase : List[Any] = DetrConfig()
else:
# verify that the decoder is supported
__UpperCAmelCase : List[Any] = (
decoder_config.pop("""model_type""" ) if isinstance(__lowercase , __lowercase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Dict = CONFIG_MAPPING[decoder_type]
__UpperCAmelCase : Union[str, Any] = config_class.from_dict(__lowercase )
__UpperCAmelCase : Optional[Any] = backbone_config
__UpperCAmelCase : List[str] = decoder_config
# main feature dimension for the model
__UpperCAmelCase : Union[str, Any] = fpn_feature_size
__UpperCAmelCase : Optional[Any] = mask_feature_size
# initializer
__UpperCAmelCase : int = init_std
__UpperCAmelCase : Any = init_xavier_std
# Hungarian matcher && loss
__UpperCAmelCase : Any = cross_entropy_weight
__UpperCAmelCase : Optional[Any] = dice_weight
__UpperCAmelCase : List[str] = mask_weight
__UpperCAmelCase : Union[str, Any] = use_auxiliary_loss
__UpperCAmelCase : int = no_object_weight
__UpperCAmelCase : int = output_auxiliary_logits
__UpperCAmelCase : Optional[Any] = self.decoder_config.encoder_attention_heads
__UpperCAmelCase : Dict = self.decoder_config.num_hidden_layers
super().__init__(**__lowercase )
@classmethod
def UpperCAmelCase ( cls : int , __lowercase : PretrainedConfig , __lowercase : PretrainedConfig , **__lowercase : str ) -> Tuple:
return cls(
backbone_config=__lowercase , decoder_config=__lowercase , **__lowercase , )
def UpperCAmelCase ( self : List[Any] ) -> Dict[str, any]:
__UpperCAmelCase : Optional[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Optional[int] = self.backbone_config.to_dict()
__UpperCAmelCase : Any = self.decoder_config.to_dict()
__UpperCAmelCase : Tuple = self.__class__.model_type
return output
| 114
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger("""transformers.models.encodec""")
__lowerCAmelCase = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__lowerCAmelCase = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__lowerCAmelCase = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__lowerCAmelCase = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__lowerCAmelCase = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__lowerCAmelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__lowerCAmelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__lowerCAmelCase = []
__lowerCAmelCase = []
def UpperCAmelCase_ (__a : List[str] , __a : str , __a : Union[str, Any] , __a : List[str] , __a : Optional[Any] ):
"""simple docstring"""
for attribute in key.split('.' ):
_a : Any = getattr(__a , __a )
if weight_type is not None:
_a : Optional[int] = getattr(__a , __a ).shape
else:
_a : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_a : int = value
elif weight_type == "weight_g":
_a : List[Any] = value
elif weight_type == "weight_v":
_a : str = value
elif weight_type == "bias":
_a : Optional[int] = value
elif weight_type == "running_mean":
_a : List[Any] = value
elif weight_type == "running_var":
_a : Dict = value
elif weight_type == "num_batches_tracked":
_a : Dict = value
elif weight_type == "weight_ih_l0":
_a : List[Any] = value
elif weight_type == "weight_hh_l0":
_a : List[Any] = value
elif weight_type == "bias_ih_l0":
_a : Optional[int] = value
elif weight_type == "bias_hh_l0":
_a : Optional[Any] = value
elif weight_type == "weight_ih_l1":
_a : Optional[int] = value
elif weight_type == "weight_hh_l1":
_a : Optional[Any] = value
elif weight_type == "bias_ih_l1":
_a : Dict = value
elif weight_type == "bias_hh_l1":
_a : Optional[Any] = value
else:
_a : int = value
logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def UpperCAmelCase_ (__a : Tuple , __a : Any ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a, _a : Optional[Any] = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCAmelCase_ (__a : str , __a : Any , __a : Union[str, Any] ):
"""simple docstring"""
_a : Dict = []
if model_name == "encodec_24khz" or "encodec_32khz":
_a : Optional[Any] = MAPPING_24K
elif model_name == "encodec_48khz":
_a : Optional[int] = MAPPING_48K
else:
raise ValueError(f"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(__a , __a ):
logger.info(f"""{name} was ignored""" )
continue
_a : List[str] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_a, _a : List[Any] = key.split('.*.' )
if prefix in name and suffix in name:
_a : Union[str, Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
_a : Optional[int] = True
if "*" in mapped_key:
_a : List[Any] = name.split(__a )[0].split('.' )[-2]
_a : Dict = mapped_key.replace('*' , __a )
if "weight_g" in name:
_a : int = 'weight_g'
elif "weight_v" in name:
_a : int = 'weight_v'
elif "weight_ih_l0" in name:
_a : List[Any] = 'weight_ih_l0'
elif "weight_hh_l0" in name:
_a : List[Any] = 'weight_hh_l0'
elif "bias_ih_l0" in name:
_a : Optional[Any] = 'bias_ih_l0'
elif "bias_hh_l0" in name:
_a : List[str] = 'bias_hh_l0'
elif "weight_ih_l1" in name:
_a : List[str] = 'weight_ih_l1'
elif "weight_hh_l1" in name:
_a : Union[str, Any] = 'weight_hh_l1'
elif "bias_ih_l1" in name:
_a : str = 'bias_ih_l1'
elif "bias_hh_l1" in name:
_a : Any = 'bias_hh_l1'
elif "bias" in name:
_a : Optional[int] = 'bias'
elif "weight" in name:
_a : Tuple = 'weight'
elif "running_mean" in name:
_a : Any = 'running_mean'
elif "running_var" in name:
_a : Optional[int] = 'running_var'
elif "num_batches_tracked" in name:
_a : Union[str, Any] = 'num_batches_tracked'
else:
_a : int = None
set_recursively(__a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(f"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def UpperCAmelCase_ (__a : int , __a : Optional[Any] , __a : Tuple , __a : str=None , __a : Dict=None , ):
"""simple docstring"""
if config_path is not None:
_a : List[str] = EncodecConfig.from_pretrained(__a )
else:
_a : Tuple = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_a : Optional[int] = [8, 5, 4, 4]
_a : Any = [2.2]
_a : Dict = 6_4
_a : Dict = 3_2_0_0_0
_a : Optional[int] = 2_0_4_8
_a : Tuple = False
_a : Optional[int] = False
_a : List[Any] = False
elif model_name == "encodec_48khz":
_a : int = [8, 5, 4, 2]
_a : Any = [3.0, 6.0, 12.0, 24.0]
_a : List[str] = 4_8_0_0_0
_a : str = 2
_a : List[str] = False
_a : Optional[Any] = 'time_group_norm'
_a : Optional[int] = True
_a : Tuple = 1.0
_a : Optional[int] = 0.01
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
_a : Any = EncodecModel(__a )
_a : Optional[int] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__a )
_a : Dict = torch.load(__a )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_a : Tuple = original_checkpoint['best_state']
recursively_load_weights(__a , __a , __a )
model.save_pretrained(__a )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(__a )
model.push_to_hub(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__lowerCAmelCase = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 5
|
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
_a : int = FileLock(str(tmpdir / 'foo.lock' ) )
_a : List[Any] = FileLock(str(tmpdir / 'foo.lock' ) )
_a : Any = 0.01
with locka.acquire():
with pytest.raises(__a ):
_a : int = time.time()
locka.acquire(__a )
assert time.time() - _start > timeout
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Dict = 'a' * 1_0_0_0 + '.lock'
_a : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__a )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
_a : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__a ):
locka.acquire(0 )
| 5
| 1
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_SCREAMING_SNAKE_CASE = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 343
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = ShapEPipeline
__lowerCAmelCase = ["""prompt"""]
__lowerCAmelCase = ["""prompt"""]
__lowerCAmelCase = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
__lowerCAmelCase = False
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return 8
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowerCamelCase_ )
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
UpperCamelCase = PriorTransformer(**lowerCamelCase_ )
return model
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
UpperCamelCase = ShapERenderer(**lowerCamelCase_ )
return model
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.dummy_prior
UpperCamelCase = self.dummy_text_encoder
UpperCamelCase = self.dummy_tokenizer
UpperCamelCase = self.dummy_renderer
UpperCamelCase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=lowerCamelCase_ , clip_sample=lowerCamelCase_ , clip_sample_range=1.0 , )
UpperCamelCase = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any]=0 ):
"""simple docstring"""
if str(lowerCamelCase_ ).startswith("""mps""" ):
UpperCamelCase = torch.manual_seed(lowerCamelCase_ )
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
UpperCamelCase = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = """cpu"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**lowerCamelCase_ )
UpperCamelCase = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
UpperCamelCase = output.images[0]
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCamelCase = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = torch_device == """cpu"""
UpperCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCamelCase_ , relax_max_difference=lowerCamelCase_ , )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**lowerCamelCase_ )
UpperCamelCase = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
UpperCamelCase = batch_size * [inputs[key]]
UpperCamelCase = pipe(**lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
UpperCamelCase = ShapEPipeline.from_pretrained("""openai/shap-e""" )
UpperCamelCase = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCamelCase = pipe(
"""a shark""" , generator=lowerCamelCase_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
| 343
| 1
|
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
_SCREAMING_SNAKE_CASE = logging.getLogger()
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : List[str] = {}
snake_case_ : Optional[Any] = os.path.join(__a , 'all_results.json' )
if os.path.exists(__a ):
with open(__a , 'r' ) as f:
snake_case_ : Tuple = json.load(__a )
else:
raise ValueError(f"""can't find {path}""" )
return results
_SCREAMING_SNAKE_CASE = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
"""simple docstring"""
import xla_spawn
snake_case_ : Tuple = self.get_auto_remove_tmp_dir()
snake_case_ : Optional[Any] = F"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(_A , 'argv' , _A ):
snake_case_ : str = time()
xla_spawn.main()
snake_case_ : Tuple = time()
snake_case_ : Any = get_results(_A )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
"""simple docstring"""
import xla_spawn
snake_case_ : Optional[Any] = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(_A , 'argv' , _A ):
xla_spawn.main()
| 369
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Optional[int] = None
if token is not None:
snake_case_ : List[str] = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
snake_case_ : Union[str, Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
snake_case_ : Optional[int] = requests.get(__a , headers=__a ).json()
snake_case_ : List[str] = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
snake_case_ : Dict = math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(__a ):
snake_case_ : Optional[Any] = requests.get(url + f"""&page={i + 2}""" , headers=__a ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Union[str, Any] = None
if token is not None:
snake_case_ : List[Any] = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
snake_case_ : Optional[Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
snake_case_ : Union[str, Any] = requests.get(__a , headers=__a ).json()
snake_case_ : Any = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
snake_case_ : str = math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(__a ):
snake_case_ : int = requests.get(url + f"""&page={i + 2}""" , headers=__a ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a ):
snake_case_ : Dict = None
if token is not None:
snake_case_ : List[Any] = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
snake_case_ : Optional[int] = requests.get(__a , headers=__a , allow_redirects=__a )
snake_case_ : str = result.headers['Location']
snake_case_ : List[str] = requests.get(__a , allow_redirects=__a )
snake_case_ : Optional[Any] = os.path.join(__a , f"""{artifact_name}.zip""" )
with open(__a , 'wb' ) as fp:
fp.write(response.content )
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Any = []
snake_case_ : Any = []
snake_case_ : Tuple = None
with zipfile.ZipFile(__a ) as z:
for filename in z.namelist():
if not os.path.isdir(__a ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__a ) as f:
for line in f:
snake_case_ : Tuple = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case_ : Tuple = line[: line.index(': ' )]
snake_case_ : Union[str, Any] = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
snake_case_ : Any = line[len('FAILED ' ) :]
failed_tests.append(__a )
elif filename == "job_name.txt":
snake_case_ : Union[str, Any] = line
if len(__a ) != len(__a ):
raise ValueError(
f"""`errors` and `failed_tests` should have the same number of elements. Got {len(__a )} for `errors` """
f"""and {len(__a )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
' problem.' )
snake_case_ : List[str] = None
if job_name and job_links:
snake_case_ : Union[str, Any] = job_links.get(__a , __a )
# A list with elements of the form (line of error, error, failed test)
snake_case_ : Optional[Any] = [x + [y] + [job_link] for x, y in zip(__a , __a )]
return result
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Any = []
snake_case_ : Any = [os.path.join(__a , __a ) for p in os.listdir(__a ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__a , job_links=__a ) )
return errors
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Optional[int] = Counter()
counter.update([x[1] for x in logs] )
snake_case_ : str = counter.most_common()
snake_case_ : Tuple = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case_ : int = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case_ : int = dict(sorted(r.items() , key=lambda __a : item[1]["count"] , reverse=__a ) )
return r
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Tuple = test.split('::' )[0]
if test.startswith('tests/models/' ):
snake_case_ : List[str] = test.split('/' )[2]
else:
snake_case_ : Union[str, Any] = None
return test
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Optional[int] = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case_ : str = [x for x in logs if x[2] is not None]
snake_case_ : int = {x[2] for x in logs}
snake_case_ : Dict = {}
for test in tests:
snake_case_ : List[str] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case_ : Any = counter.most_common()
snake_case_ : str = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case_ : Tuple = sum(error_counts.values() )
if n_errors > 0:
snake_case_ : List[Any] = {'count': n_errors, 'errors': error_counts}
snake_case_ : int = dict(sorted(r.items() , key=lambda __a : item[1]["count"] , reverse=__a ) )
return r
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Optional[Any] = '| no. | error | status |'
snake_case_ : str = '|-:|:-|:-|'
snake_case_ : Tuple = [header, sep]
for error in reduced_by_error:
snake_case_ : Dict = reduced_by_error[error]['count']
snake_case_ : List[str] = f"""| {count} | {error[:1_00]} | |"""
lines.append(__a )
return "\n".join(__a )
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Optional[Any] = '| model | no. of errors | major error | count |'
snake_case_ : Union[str, Any] = '|-:|-:|-:|-:|'
snake_case_ : Optional[int] = [header, sep]
for model in reduced_by_model:
snake_case_ : Any = reduced_by_model[model]['count']
snake_case_ ,snake_case_ : Dict = list(reduced_by_model[model]['errors'].items() )[0]
snake_case_ : Any = f"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(__a )
return "\n".join(__a )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_SCREAMING_SNAKE_CASE = get_job_links(args.workflow_run_id, token=args.token)
_SCREAMING_SNAKE_CASE = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_SCREAMING_SNAKE_CASE = k.find(""" / """)
_SCREAMING_SNAKE_CASE = k[index + len(""" / """) :]
_SCREAMING_SNAKE_CASE = v
with open(os.path.join(args.output_dir, """job_links.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_SCREAMING_SNAKE_CASE = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_SCREAMING_SNAKE_CASE = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_SCREAMING_SNAKE_CASE = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, """errors.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = reduce_by_error(errors)
_SCREAMING_SNAKE_CASE = reduce_by_model(errors)
_SCREAMING_SNAKE_CASE = make_github_table(reduced_by_error)
_SCREAMING_SNAKE_CASE = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, """reduced_by_error.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, """reduced_by_model.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
| 88
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
lowercase_ : int = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
sd_pipe.set_scheduler('''sample_euler''' )
lowercase_ : int = '''A painting of a squirrel eating a burger'''
lowercase_ : Optional[int] = torch.manual_seed(0 )
lowercase_ : Optional[Any] = sd_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
lowercase_ : str = output.images
lowercase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowercase_ : str = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Any = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowercase_ : Optional[int] = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
sd_pipe.set_scheduler('''sample_euler''' )
lowercase_ : Optional[int] = '''A painting of a squirrel eating a burger'''
lowercase_ : List[Any] = torch.manual_seed(0 )
lowercase_ : Optional[int] = sd_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
lowercase_ : Any = output.images
lowercase_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowercase_ : str = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowercase_ : Optional[Any] = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
lowercase_ : List[Any] = '''A painting of a squirrel eating a burger'''
lowercase_ : int = torch.manual_seed(0 )
lowercase_ : Dict = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=__SCREAMING_SNAKE_CASE , )
lowercase_ : Union[str, Any] = output.images
lowercase_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowercase_ : Optional[int] = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 93
|
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
A__: Optional[int] = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
A__: int = logging.WARNING
def lowerCAmelCase_ ( ):
UpperCamelCase__: Optional[int] = os.getenv("DATASETS_VERBOSITY" ,A_)
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option DATASETS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys()) }")
return _default_log_level
def lowerCAmelCase_ ( ):
return __name__.split(".")[0]
def lowerCAmelCase_ ( ):
return logging.getLogger(_get_library_name())
def lowerCAmelCase_ ( ):
# Apply our default configuration to the library root logger.
UpperCamelCase__: Tuple = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level())
def lowerCAmelCase_ ( ):
UpperCamelCase__: Tuple = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET)
def lowerCAmelCase_ ( A_ = None):
if name is None:
UpperCamelCase__: Optional[Any] = _get_library_name()
return logging.getLogger(A_)
def lowerCAmelCase_ ( ):
return _get_library_root_logger().getEffectiveLevel()
def lowerCAmelCase_ ( A_):
_get_library_root_logger().setLevel(A_)
def lowerCAmelCase_ ( ):
return set_verbosity(A_)
def lowerCAmelCase_ ( ):
return set_verbosity(A_)
def lowerCAmelCase_ ( ):
return set_verbosity(A_)
def lowerCAmelCase_ ( ):
return set_verbosity(A_)
def lowerCAmelCase_ ( ):
UpperCamelCase__: List[Any] = False
def lowerCAmelCase_ ( ):
UpperCamelCase__: List[str] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _a :
"""simple docstring"""
def __init__( self: int , *__lowerCamelCase: Tuple , **__lowerCamelCase: str ): # pylint: disable=unused-argument
'''simple docstring'''
UpperCamelCase__: int = args[0] if args else None
def __iter__( self: Optional[Any] ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self: Dict , __lowerCamelCase: Any ):
'''simple docstring'''
def empty_fn(*__lowerCamelCase: Any , **__lowerCamelCase: Optional[Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self: str ):
'''simple docstring'''
return self
def __exit__( self: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: List[Any] ):
'''simple docstring'''
return
A__: Tuple = True
class _a :
"""simple docstring"""
def __call__( self: Any , *__lowerCamelCase: List[str] , __lowerCamelCase: List[Any]=False , **__lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*__lowerCamelCase , **__lowerCamelCase )
else:
return EmptyTqdm(*__lowerCamelCase , **__lowerCamelCase )
def UpperCAmelCase_ ( self: List[str] , *__lowerCamelCase: List[str] , **__lowerCamelCase: Tuple ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__lowerCamelCase , **__lowerCamelCase )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
A__: Optional[Any] = _tqdm_cls()
def lowerCAmelCase_ ( ):
global _tqdm_active
return bool(_tqdm_active)
def lowerCAmelCase_ ( ):
global _tqdm_active
UpperCamelCase__: int = True
def lowerCAmelCase_ ( ):
global _tqdm_active
UpperCamelCase__: str = False
| 149
| 0
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
_lowerCamelCase : Dict = input("""Enter image url: """).strip()
print(F'''Downloading image from {url} ...''')
_lowerCamelCase : List[Any] = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
_lowerCamelCase : List[Any] = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
_lowerCamelCase : Dict = requests.get(image_url).content
_lowerCamelCase : Tuple = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(F'''Done. Image saved to disk as {file_name}.''')
| 231
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Union[str, Any] = """▁"""
_lowerCamelCase : Optional[Any] = {"""vocab_file""": """spiece.model"""}
_lowerCamelCase : str = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}
}
_lowerCamelCase : List[str] = {
"""google/pegasus-xsum""": 512,
}
_lowerCamelCase : int = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str="<pad>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : Union[str, Any]="<mask_2>" , UpperCAmelCase__ : List[str]="<mask_1>" , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=103 , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Dict , ) ->None:
'''simple docstring'''
A__ = offset
if additional_special_tokens is not None:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__):
raise TypeError(
f"""additional_special_tokens should be of type {type(UpperCAmelCase__)}, but is"""
f""" {type(UpperCAmelCase__)}""")
A__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(UpperCAmelCase__) , self.offset - 1)
]
if len(set(UpperCAmelCase__)) != len(UpperCAmelCase__):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""")
A__ = additional_special_tokens_extended
else:
A__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset)]
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token_sent=UpperCAmelCase__ , offset=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
A__ = mask_token_sent
A__ = vocab_file
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(UpperCAmelCase__)
# add special tokens to encoder dict
A__ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
})
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1)})
A__ = {v: k for k, v in self.encoder.items()}
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
return len(self.sp_model) + self.offset
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict[str, int]:
'''simple docstring'''
A__ = {self.convert_ids_to_tokens(UpperCAmelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Any) ->Union[str, Any]:
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self : int , UpperCAmelCase__ : Optional[int]) ->Optional[int]:
'''simple docstring'''
A__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : str) ->List[str]:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : str) ->int:
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
A__ = self.sp_model.piece_to_id(UpperCAmelCase__)
return sp_id + self.offset
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : int) ->str:
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
A__ = self.sp_model.IdToPiece(index - self.offset)
return token
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : int) ->Optional[int]:
'''simple docstring'''
A__ = []
A__ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCAmelCase__) + token
A__ = []
else:
current_sub_tokens.append(UpperCAmelCase__)
out_string += self.sp_model.decode(UpperCAmelCase__)
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Optional[int]=False) ->Union[str, Any]:
'''simple docstring'''
return 1
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
A__ = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : List , UpperCAmelCase__ : Optional[List] = None , UpperCAmelCase__ : bool = False) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(UpperCAmelCase__)
elif token_ids_a is None:
return self._special_token_mask(UpperCAmelCase__) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a) + [1]
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str]=None) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
A__ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase__)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase__ , '''wb''') as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__)
return (out_vocab_file,)
| 231
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""YolosFeatureExtractor"""]
UpperCAmelCase = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 256
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """trajectory_transformer"""
_UpperCamelCase = ["""past_key_values"""]
_UpperCamelCase = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , A_=100 , A_=5 , A_=1 , A_=1 , A_=249 , A_=6 , A_=17 , A_=25 , A_=4 , A_=4 , A_=128 , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0_006 , A_=512 , A_=0.02 , A_=1e-12 , A_=1 , A_=True , A_=1 , A_=5_0256 , A_=5_0256 , **A_ , ) ->int:
'''simple docstring'''
__lowerCAmelCase : Any = vocab_size
__lowerCAmelCase : Tuple = action_weight
__lowerCAmelCase : Tuple = reward_weight
__lowerCAmelCase : Union[str, Any] = value_weight
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : str = block_size
__lowerCAmelCase : Optional[Any] = action_dim
__lowerCAmelCase : Union[str, Any] = observation_dim
__lowerCAmelCase : Union[str, Any] = transition_dim
__lowerCAmelCase : Dict = learning_rate
__lowerCAmelCase : Any = n_layer
__lowerCAmelCase : Any = n_head
__lowerCAmelCase : Optional[int] = n_embd
__lowerCAmelCase : str = embd_pdrop
__lowerCAmelCase : Dict = attn_pdrop
__lowerCAmelCase : Optional[int] = resid_pdrop
__lowerCAmelCase : Union[str, Any] = initializer_range
__lowerCAmelCase : Optional[int] = layer_norm_eps
__lowerCAmelCase : Any = kaiming_initializer_range
__lowerCAmelCase : List[str] = use_cache
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
| 275
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowercase__ : Any = grid[0]
for row_n in range(1 , len(__lowerCamelCase ) ):
lowercase__ : Optional[int] = grid[row_n]
lowercase__ : Union[str, Any] = fill_row(__lowerCamelCase , __lowerCamelCase )
lowercase__ : str = grid[row_n]
return grid[-1][-1]
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 , len(__lowerCamelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase : Optional[str] = field(
default="NER" ,metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCAmelCase : bool = field(default=A_ ,metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : str = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} ,)
lowerCAmelCase : int = field(
default=1_2_8 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
lowerCAmelCase : bool = field(
default=A_ ,metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __UpperCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
lowercase__ : str = import_module('''tasks''' )
try:
lowercase__ : List[str] = getattr(__lowerCamelCase , model_args.task_type )
lowercase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowercase__ : Union[str, Any] = token_classification_task.get_labels(data_args.labels )
lowercase__ : Dict[int, str] = dict(enumerate(__lowerCamelCase ) )
lowercase__ : Optional[int] = len(__lowerCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid={label: i for i, label in enumerate(__lowerCamelCase )} , cache_dir=model_args.cache_dir , )
lowercase__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowercase__ : str = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase__ : str = (
TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase__ : str = (
TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__lowerCamelCase , __lowerCamelCase ) -> Tuple[List[int], List[int]]:
lowercase__ : Tuple = np.argmax(__lowerCamelCase , axis=2 )
lowercase__ , lowercase__ : Tuple = preds.shape
lowercase__ : List[str] = [[] for _ in range(__lowerCamelCase )]
lowercase__ : Tuple = [[] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__lowerCamelCase ) -> Dict:
lowercase__ , lowercase__ : List[Any] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__lowerCamelCase , __lowerCamelCase ),
"precision": precision_score(__lowerCamelCase , __lowerCamelCase ),
"recall": recall_score(__lowerCamelCase , __lowerCamelCase ),
"f1": fa_score(__lowerCamelCase , __lowerCamelCase ),
}
# Data collator
lowercase__ : Tuple = DataCollatorWithPadding(__lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase__ : str = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , compute_metrics=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ : int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ : Optional[int] = trainer.evaluate()
lowercase__ : Union[str, Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __lowerCamelCase , __lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__lowerCamelCase )
# Predict
if training_args.do_predict:
lowercase__ : Optional[int] = TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = trainer.predict(__lowerCamelCase )
lowercase__ , lowercase__ : Tuple = align_predictions(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Dict = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , __lowerCamelCase , __lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
lowercase__ : Dict = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return results
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 302
| 1
|
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger('''transformers.models.encodec''')
UpperCAmelCase__ = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
UpperCAmelCase__ = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
UpperCAmelCase__ = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
UpperCAmelCase__ = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
UpperCAmelCase__ = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
UpperCAmelCase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
UpperCAmelCase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Optional[int]:
"""simple docstring"""
for attribute in key.split('''.''' ):
_lowercase =getattr(__snake_case , __snake_case )
if weight_type is not None:
_lowercase =getattr(__snake_case , __snake_case ).shape
else:
_lowercase =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
_lowercase =value
elif weight_type == "weight_g":
_lowercase =value
elif weight_type == "weight_v":
_lowercase =value
elif weight_type == "bias":
_lowercase =value
elif weight_type == "running_mean":
_lowercase =value
elif weight_type == "running_var":
_lowercase =value
elif weight_type == "num_batches_tracked":
_lowercase =value
elif weight_type == "weight_ih_l0":
_lowercase =value
elif weight_type == "weight_hh_l0":
_lowercase =value
elif weight_type == "bias_ih_l0":
_lowercase =value
elif weight_type == "bias_hh_l0":
_lowercase =value
elif weight_type == "weight_ih_l1":
_lowercase =value
elif weight_type == "weight_hh_l1":
_lowercase =value
elif weight_type == "bias_ih_l1":
_lowercase =value
elif weight_type == "bias_hh_l1":
_lowercase =value
else:
_lowercase =value
logger.info(F"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> int:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_lowercase , _lowercase =key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Optional[int]:
"""simple docstring"""
_lowercase =[]
if model_name == "encodec_24khz" or "encodec_32khz":
_lowercase =MAPPING_24K
elif model_name == "encodec_48khz":
_lowercase =MAPPING_48K
else:
raise ValueError(F"Unsupported model: {model_name}" )
for name, value in orig_dict.items():
if should_ignore(__snake_case , __snake_case ):
logger.info(F"{name} was ignored" )
continue
_lowercase =False
for key, mapped_key in MAPPING.items():
if "*" in key:
_lowercase , _lowercase =key.split('''.*.''' )
if prefix in name and suffix in name:
_lowercase =suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
_lowercase =True
if "*" in mapped_key:
_lowercase =name.split(__snake_case )[0].split('''.''' )[-2]
_lowercase =mapped_key.replace('''*''' , __snake_case )
if "weight_g" in name:
_lowercase ='''weight_g'''
elif "weight_v" in name:
_lowercase ='''weight_v'''
elif "weight_ih_l0" in name:
_lowercase ='''weight_ih_l0'''
elif "weight_hh_l0" in name:
_lowercase ='''weight_hh_l0'''
elif "bias_ih_l0" in name:
_lowercase ='''bias_ih_l0'''
elif "bias_hh_l0" in name:
_lowercase ='''bias_hh_l0'''
elif "weight_ih_l1" in name:
_lowercase ='''weight_ih_l1'''
elif "weight_hh_l1" in name:
_lowercase ='''weight_hh_l1'''
elif "bias_ih_l1" in name:
_lowercase ='''bias_ih_l1'''
elif "bias_hh_l1" in name:
_lowercase ='''bias_hh_l1'''
elif "bias" in name:
_lowercase ='''bias'''
elif "weight" in name:
_lowercase ='''weight'''
elif "running_mean" in name:
_lowercase ='''running_mean'''
elif "running_var" in name:
_lowercase ='''running_var'''
elif "num_batches_tracked" in name:
_lowercase ='''num_batches_tracked'''
else:
_lowercase =None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F"Unused weights: {unused_weights}" )
@torch.no_grad()
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case=None , __snake_case=None , ) -> Union[str, Any]:
"""simple docstring"""
if config_path is not None:
_lowercase =EncodecConfig.from_pretrained(__snake_case )
else:
_lowercase =EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_lowercase =[8, 5, 4, 4]
_lowercase =[2.2]
_lowercase =64
_lowercase =32000
_lowercase =2048
_lowercase =False
_lowercase =False
_lowercase =False
elif model_name == "encodec_48khz":
_lowercase =[8, 5, 4, 2]
_lowercase =[3.0, 6.0, 12.0, 24.0]
_lowercase =48000
_lowercase =2
_lowercase =False
_lowercase ='''time_group_norm'''
_lowercase =True
_lowercase =1.0
_lowercase =0.01
else:
raise ValueError(F"Unknown model name: {model_name}" )
_lowercase =EncodecModel(__snake_case )
_lowercase =EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__snake_case )
_lowercase =torch.load(__snake_case )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_lowercase =original_checkpoint['''best_state''']
recursively_load_weights(__snake_case , __snake_case , __snake_case )
model.save_pretrained(__snake_case )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(__snake_case )
model.push_to_hub(__snake_case )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
UpperCAmelCase__ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 5
|
from math import isqrt
def UpperCAmelCase_ ( __snake_case ) -> list[int]:
"""simple docstring"""
_lowercase =[True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __snake_case , __snake_case ):
_lowercase =False
return [i for i in range(2 , __snake_case ) if is_prime[i]]
def UpperCAmelCase_ ( __snake_case = 10**8 ) -> int:
"""simple docstring"""
_lowercase =calculate_prime_numbers(max_number // 2 )
_lowercase =0
_lowercase =0
_lowercase =len(__snake_case ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _a ( a :str=None ) -> Tuple:
if subparsers is not None:
a = subparsers.add_parser('''test''' )
else:
a = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=a , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=a )
return parser
def _a ( a :Optional[Any] ) -> Dict:
a = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
a = script_name
else:
a = F"""--config_file={args.config_file} {script_name}"""
a = ['''accelerate-launch'''] + test_args.split()
a = execute_subprocess_async(a , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def _a ( ) -> Optional[int]:
a = test_command_parser()
a = parser.parse_args()
test_command(a )
if __name__ == "__main__":
main()
| 26
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : UNetaDModel , __UpperCAmelCase : UNetaDModel , __UpperCAmelCase : DDPMScheduler , __UpperCAmelCase : Optional[int] , ) ->List[str]:
"""simple docstring"""
super().__init__()
a = value_function
a = unet
a = scheduler
a = env
a = env.get_dataset()
a = {}
for key in self.data.keys():
try:
a = self.data[key].mean()
except: # noqa: E722
pass
a = {}
for key in self.data.keys():
try:
a = self.data[key].std()
except: # noqa: E722
pass
a = env.observation_space.shape[0]
a = env.action_space.shape[0]
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ) ->Dict:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) ->List[str]:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def __lowerCAmelCase ( self : int , __UpperCAmelCase : int ) ->List[str]:
"""simple docstring"""
if type(__UpperCAmelCase ) is dict:
return {k: self.to_torch(__UpperCAmelCase ) for k, v in x_in.items()}
elif torch.is_tensor(__UpperCAmelCase ):
return x_in.to(self.unet.device )
return torch.tensor(__UpperCAmelCase , device=self.unet.device )
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple ) ->int:
"""simple docstring"""
for key, val in cond.items():
a = val.clone()
return x_in
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = x.shape[0]
a = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
a = torch.full((batch_size,) , __UpperCAmelCase , device=self.unet.device , dtype=torch.long )
for _ in range(__UpperCAmelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
a = self.value_function(x.permute(0 , 2 , 1 ) , __UpperCAmelCase ).sample
a = torch.autograd.grad([y.sum()] , [x] )[0]
a = self.scheduler._get_variance(__UpperCAmelCase )
a = torch.exp(0.5 * posterior_variance )
a = model_std * grad
a = 0
a = x.detach()
a = x + scale * grad
a = self.reset_xa(__UpperCAmelCase , __UpperCAmelCase , self.action_dim )
a = self.unet(x.permute(0 , 2 , 1 ) , __UpperCAmelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
a = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , predict_epsilon=__UpperCAmelCase )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
a = self.reset_xa(__UpperCAmelCase , __UpperCAmelCase , self.action_dim )
a = self.to_torch(__UpperCAmelCase )
return x, y
def __call__( self : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]=64 , __UpperCAmelCase : int=32 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : str=0.1 ) ->List[str]:
"""simple docstring"""
a = self.normalize(__UpperCAmelCase , '''observations''' )
a = obs[None].repeat(__UpperCAmelCase , axis=0 )
a = {0: self.to_torch(__UpperCAmelCase )}
a = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
a = randn_tensor(__UpperCAmelCase , device=self.unet.device )
a = self.reset_xa(__UpperCAmelCase , __UpperCAmelCase , self.action_dim )
a = self.to_torch(__UpperCAmelCase )
# run the diffusion process
a , a = self.run_diffusion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# sort output trajectories by value
a = y.argsort(0 , descending=__UpperCAmelCase ).squeeze()
a = x[sorted_idx]
a = sorted_values[:, :, : self.action_dim]
a = actions.detach().cpu().numpy()
a = self.de_normalize(__UpperCAmelCase , key='''actions''' )
# select the action with the highest value
if y is not None:
a = 0
else:
# if we didn't run value guiding, select a random action
a = np.random.randint(0 , __UpperCAmelCase )
a = denorm_actions[selected_index, 0]
return denorm_actions
| 26
| 1
|
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_A : Dict =logging.getLogger(__name__)
_A : List[str] =list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_A : Optional[Any] =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowercase :
a = field(
default=_A , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
a = field(
default=_A , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_A )} , )
a = field(
default=_A , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
a = field(
default=_A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a = field(
default=_A , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a = field(
default=_A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a = field(
default=_A , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a = field(
default=_A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def lowerCamelCase_ ( self: List[Any] ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class _lowercase :
a = field(
default=_A , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
a = field(
default=_A , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a = field(default=_A , metadata={"""help""": """The input training data file (a text file)."""} )
a = field(
default=_A , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a = field(
default=_A , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
a = field(
default=_A , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
a = field(
default=_A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
a = field(
default=_A , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
a = field(
default=_A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
a = field(
default=_A , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def lowerCamelCase_ ( self: Any ):
if self.train_file is not None:
lowerCamelCase__ : str = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase__ : Tuple = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
with open(A_ , """r""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ : Tuple = [json.loads(A_ ) for line in f.read().splitlines() if (len(A_ ) > 0 and not line.isspace())]
assert len(A_ ) == len(A_ )
lowerCamelCase__ : str = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase__ : int = refs
return Dataset.from_dict(A_ )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
lowerCamelCase__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase__ : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , A_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase__ : List[str] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowerCamelCase__ : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''train[:{data_args.validation_split_percentage}%]''' , )
lowerCamelCase__ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''train[{data_args.validation_split_percentage}%:]''' , )
else:
lowerCamelCase__ : str = {}
if data_args.train_file is not None:
lowerCamelCase__ : List[Any] = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase__ : Union[str, Any] = data_args.validation_file
lowerCamelCase__ : Union[str, Any] = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
lowerCamelCase__ : Union[str, Any] = """text"""
lowerCamelCase__ : List[str] = load_dataset(A_ , data_files=A_ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ : Tuple = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase__ : Any = AutoConfig.from_pretrained(model_args.config_name , **A_ )
elif model_args.model_name_or_path:
lowerCamelCase__ : Any = AutoConfig.from_pretrained(model_args.model_name_or_path , **A_ )
else:
lowerCamelCase__ : Tuple = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
lowerCamelCase__ : Union[str, Any] = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase__ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **A_ )
elif model_args.model_name_or_path:
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **A_ )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
lowerCamelCase__ : Tuple = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=A_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
lowerCamelCase__ : Union[str, Any] = AutoModelForMaskedLM.from_config(A_ )
model.resize_token_embeddings(len(A_ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase__ : Optional[int] = datasets["""train"""].column_names
else:
lowerCamelCase__ : str = datasets["""validation"""].column_names
lowerCamelCase__ : List[Any] = """text""" if """text""" in column_names else column_names[0]
lowerCamelCase__ : List[Any] = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(UpperCamelCase ):
# Remove empty lines
lowerCamelCase__ : Optional[Any] = [line for line in examples["""text"""] if len(A_ ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=A_ , truncation=A_ , max_length=data_args.max_seq_length )
lowerCamelCase__ : Tuple = datasets.map(
A_ , batched=A_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase__ : Tuple = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowerCamelCase__ : List[Any] = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase__ : Any = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase__ : Dict = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase__ : List[Any] = DataCollatorForWholeWordMask(tokenizer=A_ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCamelCase__ : Union[str, Any] = Trainer(
model=A_ , args=A_ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=A_ , data_collator=A_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase__ : List[Any] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowerCamelCase__ : Tuple = model_args.model_name_or_path
else:
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[int] = trainer.train(resume_from_checkpoint=A_ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase__ : Tuple = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(A_ , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
lowerCamelCase__ : Any = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCamelCase__ : List[Any] = trainer.evaluate()
lowerCamelCase__ : str = math.exp(eval_output["""eval_loss"""] )
lowerCamelCase__ : Dict = perplexity
lowerCamelCase__ : Tuple = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(A_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
return results
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
main()
if __name__ == "__main__":
main()
| 41
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : int = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__lowerCAmelCase : Any = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
)
__lowerCAmelCase : str = '|'.join(sys.argv[1:])
__lowerCAmelCase : Tuple = re.compile(RF'''^({joined_dirs}).*?\.py$''')
__lowerCAmelCase : Union[str, Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 88
| 0
|
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a (_A , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = LxmertTokenizer
__UpperCAmelCase : Optional[Any] = LxmertTokenizerFast
__UpperCAmelCase : str = True
__UpperCAmelCase : str = True
def __snake_case ( self : Any ) -> str:
super().setUp()
__snake_case : Optional[int] = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __snake_case ( self : Dict , lowerCamelCase : Tuple ) -> List[Any]:
__snake_case : List[str] = "UNwant\u00E9d,running"
__snake_case : Optional[int] = "unwanted, running"
return input_text, output_text
def __snake_case ( self : Tuple ) -> Optional[int]:
__snake_case : List[Any] = self.tokenizer_class(self.vocab_file )
__snake_case : int = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [7, 4, 5, 10, 8, 9] )
def __snake_case ( self : Optional[Any] ) -> Dict:
if not self.test_rust_tokenizer:
return
__snake_case : Optional[int] = self.get_tokenizer()
__snake_case : Any = self.get_rust_tokenizer()
__snake_case : Union[str, Any] = "I was born in 92000, and this is falsé."
__snake_case : Optional[int] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case : List[str] = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = self.get_rust_tokenizer()
__snake_case : Optional[Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__snake_case : List[str] = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 363
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Union[str, Any] = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Dict = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
__snake_case : List[Any] = MaskFormerConfig(backbone_config=__lowerCamelCase )
__snake_case : List[Any] = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
__snake_case : Any = 8_4_7
__snake_case : List[Any] = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
__snake_case : Optional[int] = 1_5_0
__snake_case : int = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
__snake_case : Optional[Any] = 1_7_1
__snake_case : List[str] = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
__snake_case : Optional[int] = 1_3_3
__snake_case : int = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
__snake_case : Union[str, Any] = 1_9
__snake_case : Dict = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
__snake_case : Any = 6_5
__snake_case : Any = "mapillary-vistas-id2label.json"
__snake_case : str = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) , "r" ) )
__snake_case : Tuple = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Dict = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Dict = dct.pop(__lowerCamelCase )
__snake_case : Any = val
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__snake_case : Optional[int] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__snake_case : Tuple = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
__snake_case : Tuple = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Tuple = in_proj_weight[:dim, :]
__snake_case : Tuple = in_proj_bias[: dim]
__snake_case : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
__snake_case : Tuple = in_proj_bias[
dim : dim * 2
]
__snake_case : str = in_proj_weight[
-dim :, :
]
__snake_case : Any = in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
# fmt: off
__snake_case : Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__snake_case : List[str] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
__snake_case : Union[str, Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Any = in_proj_weight[: hidden_size, :]
__snake_case : Optional[int] = in_proj_bias[:config.hidden_size]
__snake_case : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
__snake_case : Any = in_proj_bias[hidden_size : hidden_size * 2]
__snake_case : Tuple = in_proj_weight[-hidden_size :, :]
__snake_case : Optional[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__snake_case : Optional[Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
__snake_case : Union[str, Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : int = in_proj_weight[: hidden_size, :]
__snake_case : Tuple = in_proj_bias[:config.hidden_size]
__snake_case : str = in_proj_weight[hidden_size : hidden_size * 2, :]
__snake_case : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
__snake_case : Optional[Any] = in_proj_weight[-hidden_size :, :]
__snake_case : Tuple = in_proj_bias[-hidden_size :]
# fmt: on
def lowerCAmelCase_ ( ):
__snake_case : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case : List[str] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False ):
__snake_case : Optional[int] = get_maskformer_config(__lowerCamelCase )
# load original state_dict
with open(__lowerCamelCase , "rb" ) as f:
__snake_case : int = pickle.load(__lowerCamelCase )
__snake_case : Optional[int] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__snake_case : Tuple = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_swin_q_k_v(__lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(__lowerCamelCase , __lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
__snake_case : int = torch.from_numpy(__lowerCamelCase )
# load 🤗 model
__snake_case : List[str] = MaskFormerForInstanceSegmentation(__lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(__lowerCamelCase , param.shape )
__snake_case , __snake_case : List[str] = model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__lowerCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
__snake_case : Union[str, Any] = prepare_img()
if "vistas" in model_name:
__snake_case : Optional[int] = 6_5
elif "cityscapes" in model_name:
__snake_case : Optional[int] = 6_5_5_3_5
else:
__snake_case : Union[str, Any] = 2_5_5
__snake_case : Union[str, Any] = True if "ade" in model_name else False
__snake_case : str = MaskFormerImageProcessor(ignore_index=__lowerCamelCase , reduce_labels=__lowerCamelCase )
__snake_case : List[str] = image_processor(__lowerCamelCase , return_tensors="pt" )
__snake_case : Tuple = model(**__lowerCamelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__snake_case : Optional[Any] = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_snake_case : List[str] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 134
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.