code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if "model" in orig_key:
__snake_case = orig_key.replace("model." , "" )
if "norm1" in orig_key:
__snake_case = orig_key.replace("norm1" , "attention.output.LayerNorm" )
if "norm2" in orig_key:
__snake_case = orig_key.replace("norm2" , "output.LayerNorm" )
if "norm" in orig_key:
__snake_case = orig_key.replace("norm" , "LayerNorm" )
if "transformer" in orig_key:
__snake_case = orig_key.split("." )[0].split("_" )[-1]
__snake_case = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
__snake_case = orig_key.replace("mha.attn" , "attention.self" )
if "mha" in orig_key:
__snake_case = orig_key.replace("mha" , "attention" )
if "W_q" in orig_key:
__snake_case = orig_key.replace("W_q" , "self.query" )
if "W_k" in orig_key:
__snake_case = orig_key.replace("W_k" , "self.key" )
if "W_v" in orig_key:
__snake_case = orig_key.replace("W_v" , "self.value" )
if "ff1" in orig_key:
__snake_case = orig_key.replace("ff1" , "intermediate.dense" )
if "ff2" in orig_key:
__snake_case = orig_key.replace("ff2" , "output.dense" )
if "ff" in orig_key:
__snake_case = orig_key.replace("ff" , "output.dense" )
if "mlm_class" in orig_key:
__snake_case = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" )
if "mlm" in orig_key:
__snake_case = orig_key.replace("mlm" , "cls.predictions.transform" )
if "cls" not in orig_key:
__snake_case = "yoso." + orig_key
return orig_key
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(__snake_case )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
__snake_case = val
__snake_case = orig_state_dict["cls.predictions.decoder.bias"]
__snake_case = torch.arange(__snake_case ).expand((1, -1) ) + 2
return orig_state_dict
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
__snake_case = torch.load(__snake_case , map_location="cpu" )["model_state_dict"]
__snake_case = YosoConfig.from_json_file(__snake_case )
__snake_case = YosoForMaskedLM(__snake_case )
__snake_case = convert_checkpoint_helper(config.max_position_embeddings , __snake_case )
print(model.load_state_dict(__snake_case ) )
model.eval()
model.save_pretrained(__snake_case )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for YOSO model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 163
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _snake_case ( _A ):
_A = 'Speech2TextFeatureExtractor'
_A = 'Speech2TextTokenizer'
def __init__( self ,UpperCamelCase ,UpperCamelCase ) -> int:
super().__init__(UpperCamelCase ,UpperCamelCase )
snake_case__ :int = self.feature_extractor
snake_case__ :Any = False
def __call__( self ,*UpperCamelCase ,**UpperCamelCase ) -> Optional[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase ,**UpperCamelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
snake_case__ :int = kwargs.pop("raw_speech" )
else:
snake_case__ :Tuple = kwargs.pop("audio" ,UpperCamelCase )
snake_case__ :List[Any] = kwargs.pop("sampling_rate" ,UpperCamelCase )
snake_case__ :Tuple = kwargs.pop("text" ,UpperCamelCase )
if len(UpperCamelCase ) > 0:
snake_case__ :str = args[0]
snake_case__ :Tuple = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
snake_case__ :List[Any] = self.feature_extractor(UpperCamelCase ,*UpperCamelCase ,sampling_rate=UpperCamelCase ,**UpperCamelCase )
if text is not None:
snake_case__ :List[Any] = self.tokenizer(UpperCamelCase ,**UpperCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case__ :Optional[Any] = encodings["input_ids"]
return inputs
def lowerCAmelCase_ ( self ,*UpperCamelCase ,**UpperCamelCase ) -> Any:
return self.tokenizer.batch_decode(*UpperCamelCase ,**UpperCamelCase )
def lowerCAmelCase_ ( self ,*UpperCamelCase ,**UpperCamelCase ) -> Optional[Any]:
return self.tokenizer.decode(*UpperCamelCase ,**UpperCamelCase )
@contextmanager
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
snake_case__ :Optional[int] = True
snake_case__ :str = self.tokenizer
yield
snake_case__ :Optional[Any] = self.feature_extractor
snake_case__ :Tuple = False
| 241
| 0
|
from __future__ import annotations
__A =[
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def a ( _UpperCAmelCase : list[list[int]] , _UpperCAmelCase : list[int] , _UpperCAmelCase : list[int] , _UpperCAmelCase : int , _UpperCAmelCase : list[list[int]] , ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_UpperCAmelCase ) )
] # the reference grid
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_UpperCAmelCase ) )
] # the action grid
__UpperCAmelCase : int = init[0]
__UpperCAmelCase : List[Any] = init[1]
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : Optional[int] = g + heuristic[x][y] # cost from starting cell to destination cell
__UpperCAmelCase : Union[str, Any] = [[f, g, x, y]]
__UpperCAmelCase : Optional[Any] = False # flag that is set when search is complete
__UpperCAmelCase : List[Any] = False # flag set if we can't find expand
while not found and not resign:
if len(_UpperCAmelCase ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__UpperCAmelCase : Union[str, Any] = cell.pop()
__UpperCAmelCase : int = next_cell[2]
__UpperCAmelCase : Optional[Any] = next_cell[3]
__UpperCAmelCase : Dict = next_cell[1]
if x == goal[0] and y == goal[1]:
__UpperCAmelCase : Tuple = True
else:
for i in range(len(_UpperCAmelCase ) ): # to try out different valid actions
__UpperCAmelCase : List[Any] = x + DIRECTIONS[i][0]
__UpperCAmelCase : Any = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__UpperCAmelCase : Tuple = g + cost
__UpperCAmelCase : Optional[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__UpperCAmelCase : Union[str, Any] = 1
__UpperCAmelCase : Optional[Any] = i
__UpperCAmelCase : Dict = []
__UpperCAmelCase : Union[str, Any] = goal[0]
__UpperCAmelCase : Tuple = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__UpperCAmelCase : Tuple = x - DIRECTIONS[action[x][y]][0]
__UpperCAmelCase : Any = y - DIRECTIONS[action[x][y]][1]
__UpperCAmelCase : Dict = xa
__UpperCAmelCase : Dict = ya
invpath.append([x, y] )
__UpperCAmelCase : Optional[int] = []
for i in range(len(_UpperCAmelCase ) ):
path.append(invpath[len(_UpperCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__A =[
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__A =[0, 0]
# all coordinates are given in format [y,x]
__A =[len(grid) - 1, len(grid[0]) - 1]
__A =1
# the cost map which pushes the path closer to the goal
__A =[[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__A =abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__A =9_9
__A , __A =search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 241
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
__A ="facebook/wmt19-en-de"
__A =FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
__A =FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
__A =FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
__A =tokenizer(["Making tiny model"], return_tensors="pt")
__A =tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
__A ="tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 241
| 1
|
import string
from math import logaa
def A__ ( snake_case_ : str , snake_case_ : str ):
SCREAMING_SNAKE_CASE__: Optional[int]= document.translate(
str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' )
SCREAMING_SNAKE_CASE__: Tuple= document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def A__ ( snake_case_ : str , snake_case_ : str ):
SCREAMING_SNAKE_CASE__: str= corpus.lower().translate(
str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with ''
SCREAMING_SNAKE_CASE__: Optional[Any]= corpus_without_punctuation.split('''\n''' )
SCREAMING_SNAKE_CASE__: List[Any]= term.lower()
return (len([doc for doc in docs if term in doc] ), len(snake_case_ ))
def A__ ( snake_case_ : int , snake_case_ : int , snake_case_ : Tuple=False ):
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) , 3 )
def A__ ( snake_case_ : int , snake_case_ : int ):
return round(tf * idf , 3 )
| 64
|
from math import factorial
def A__ ( snake_case_ : int , snake_case_ : int ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(snake_case_ ) // (factorial(snake_case_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f'''fifty-two card deck is: {combinations(5_2, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
f'''4 for group projects, there are {combinations(4_0, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f'''are {combinations(1_0, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 64
| 1
|
"""simple docstring"""
def A__ ( _UpperCAmelCase : int = 50 ) -> int:
'''simple docstring'''
snake_case__ : Tuple = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f"{solution() = }")
| 150
|
"""simple docstring"""
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def A__ ( _UpperCAmelCase : str ) -> str:
'''simple docstring'''
if not is_accelerate_available():
return method
snake_case__ : Tuple = version.parse(accelerate.__version__ ).base_version
if version.parse(_UpperCAmelCase ) < version.parse("0.17.0" ):
return method
def wrapper(self : List[str] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Dict ):
if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self , *_UpperCAmelCase , **_UpperCAmelCase )
return wrapper
| 150
| 1
|
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_lowercase = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_lowercase = [ord(letter) for letter in string.ascii_lowercase]
_lowercase = {ord(char) for char in VALID_CHARS}
_lowercase = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowerCAmelCase__ ( UpperCamelCase_ : list[int] , UpperCamelCase_ : tuple[int, ...] )-> str | None:
A__ = ""
A__ = 42
A__ = 42
A__ = 42
for keychar, cipherchar in zip(cycle(UpperCamelCase_ ) , UpperCamelCase_ ):
A__ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(UpperCamelCase_ )
return decoded
def lowerCAmelCase__ ( UpperCamelCase_ : list[int] )-> list[str]:
A__ = []
for key in product(UpperCamelCase_ , repeat=3 ):
A__ = try_key(UpperCamelCase_ , UpperCamelCase_ )
if encoded is not None:
possibles.append(UpperCamelCase_ )
return possibles
def lowerCAmelCase__ ( UpperCamelCase_ : list[str] , UpperCamelCase_ : str )-> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def lowerCAmelCase__ ( UpperCamelCase_ : str = "p059_cipher.txt" )-> int:
A__ = 42
A__ = 42
A__ = 42
A__ = 42
A__ = Path(UpperCamelCase_ ).parent.joinpath(UpperCamelCase_ ).read_text(encoding='''utf-8''' )
A__ = [int(UpperCamelCase_ ) for number in data.strip().split(''',''' )]
A__ = filter_valid_chars(UpperCamelCase_ )
for common_word in COMMON_WORDS:
A__ = filter_common_word(UpperCamelCase_ , UpperCamelCase_ )
if len(UpperCamelCase_ ) == 1:
break
A__ = possibles[0]
return sum(ord(UpperCamelCase_ ) for char in decoded_text )
if __name__ == "__main__":
print(F"{solution() = }")
| 632
|
def lowerCAmelCase__ ( UpperCamelCase_ : int )-> int:
A__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCAmelCase__ ( UpperCamelCase_ : int )-> int:
A__ = 0
while number > 0:
A__ = number % 1_0
sum_of_digits += last_digit
A__ = number // 1_0 # Removing the last_digit from the given number
return sum_of_digits
def lowerCAmelCase__ ( UpperCamelCase_ : int = 1_0_0 )-> int:
A__ = factorial(UpperCamelCase_ )
A__ = split_and_add(UpperCamelCase_ )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 632
| 1
|
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = FunnelConfig.from_json_file(a_ )
print(f'Building PyTorch model from configuration: {config}' )
lowercase = FunnelBaseModel(a_ ) if base_model else FunnelModel(a_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(a_ , a_ , a_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , a_ )
if __name__ == "__main__":
lowercase__ :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
lowercase__ :Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 718
|
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowercase__ :Union[str, Any] = logging.get_logger(__name__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Union[str, Any] =CLIPConfig
lowercase_ : str =['''CLIPEncoderLayer''']
def __init__( self ,A__):
super().__init__(A__)
lowercase = CLIPVisionModelWithProjection(config.vision_config)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
@torch.no_grad()
def A__ ( self ,A__ ,A__ ,A__=0.5 ,A__=0.5):
lowercase = self.vision_model(A__)[0]
lowercase = self.p_head(A__)
lowercase = nsfw_detected.flatten()
lowercase = nsfw_detected > p_threshold
lowercase = nsfw_detected.tolist()
if any(A__):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, nsfw_detected_ in enumerate(A__):
if nsfw_detected_:
lowercase = np.zeros(images[idx].shape)
lowercase = self.w_head(A__)
lowercase = watermark_detected.flatten()
lowercase = watermark_detected > w_threshold
lowercase = watermark_detected.tolist()
if any(A__):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, watermark_detected_ in enumerate(A__):
if watermark_detected_:
lowercase = np.zeros(images[idx].shape)
return images, nsfw_detected, watermark_detected
| 633
| 0
|
import datasets
from .evaluate import evaluate
UpperCamelCase__ : Union[str, Any] = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
UpperCamelCase__ : int = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
UpperCamelCase__ : Optional[Any] = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def snake_case ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) ,codebase_urls=['https://www.atticusprojectai.org/cuad'] ,reference_urls=['https://www.atticusprojectai.org/cuad'] ,)
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
SCREAMING_SNAKE_CASE_ : int = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE_ : Optional[int] = evaluate(dataset=snake_case__ ,predictions=snake_case__ )
return score
| 105
|
'''simple docstring'''
def a_ ( UpperCamelCase_ ):
if length <= 0 or not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(UpperCamelCase_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 452
| 0
|
'''simple docstring'''
from __future__ import annotations
_a : Tuple = 1.60_21e-19 # units = C
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> tuple[str, float]:
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif conductivity < 0:
raise ValueError("Conductivity cannot be negative" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative" )
elif mobility < 0:
raise ValueError("mobility cannot be negative" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
'''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number | (1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10
| 0
|
'''simple docstring'''
class a__:
def __init__( self ) -> Union[str, Any]:
snake_case__ =''
snake_case__ =''
snake_case__ =[]
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
snake_case__ =self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
snake_case__ =self.__min_dist_top_down_dp(_UpperCAmelCase , n - 1 )
snake_case__ =self.__min_dist_top_down_dp(m - 1 , _UpperCAmelCase )
snake_case__ =self.__min_dist_top_down_dp(m - 1 , n - 1 )
snake_case__ =1 + min(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self.dp[m][n]
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> int:
snake_case__ =worda
snake_case__ =worda
snake_case__ =[[-1 for _ in range(len(_UpperCAmelCase ) )] for _ in range(len(_UpperCAmelCase ) )]
return self.__min_dist_top_down_dp(len(_UpperCAmelCase ) - 1 , len(_UpperCAmelCase ) - 1 )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> int:
snake_case__ =worda
snake_case__ =worda
snake_case__ =len(_UpperCAmelCase )
snake_case__ =len(_UpperCAmelCase )
snake_case__ =[[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
snake_case__ =j
elif j == 0: # second string is empty
snake_case__ =i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
snake_case__ =self.dp[i - 1][j - 1]
else:
snake_case__ =self.dp[i][j - 1]
snake_case__ =self.dp[i - 1][j]
snake_case__ =self.dp[i - 1][j - 1]
snake_case__ =1 + min(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self.dp[m][n]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = EditDistance()
print('''****************** Testing Edit Distance DP Algorithm ******************''')
print()
SCREAMING_SNAKE_CASE__ : int = input('''Enter the first string: ''').strip()
SCREAMING_SNAKE_CASE__ : Any = input('''Enter the second string: ''').strip()
print()
print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print('''*************** End of Testing Edit Distance DP Algorithm ***************''')
| 538
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE__ : List[Any] = {
'''camembert-base''': 5_1_2,
}
SCREAMING_SNAKE_CASE__ : Any = '''▁'''
class a__( snake_case__ ):
a_ : Tuple = VOCAB_FILES_NAMES
a_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : str = ['''input_ids''', '''attention_mask''']
a_ : Union[str, Any] = CamembertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **_UpperCAmelCase , ) -> str:
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ =AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
snake_case__ =vocab_file
snake_case__ =False if not self.vocab_file else True
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ =[self.cls_token_id]
snake_case__ =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]:
snake_case__ =[self.sep_token_id]
snake_case__ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ =os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 538
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Union[str, Any] = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 126
|
'''simple docstring'''
from __future__ import annotations
__A : Optional[int] = list[list[int]]
# assigning initial values to the grid
__A : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__A : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCAmelCase_ ( a : Matrix , a : int , a : int , a : int ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCAmelCase_ ( a : Matrix ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCAmelCase_ ( a : Matrix ):
if location := find_empty_location(a ):
a__ , a__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(a , a , a , a ):
a__ = digit
if sudoku(a ) is not None:
return grid
a__ = 0
return None
def lowerCAmelCase_ ( a : Matrix ):
for row in grid:
for cell in row:
print(a , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
__A : Optional[int] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 126
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowerCamelCase :
a__: List[str]
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='Translation' , init=lowerCAmelCase , repr=lowerCAmelCase )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase__ ( self ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __lowerCamelCase :
a__: Optional[List] = None
a__: Optional[int] = None
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='TranslationVariableLanguages' , init=lowerCAmelCase , repr=lowerCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = set(self.languages )
if self.languages and set(UpperCAmelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(UpperCAmelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ = []
for lang, text in translation_dict.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_ , lowerCamelCase_ = zip(*sorted(UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase__ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 29
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
def a__ ( self ) -> str:
_lowerCamelCase : Any = tempfile.mkdtemp()
_lowerCamelCase : List[Any] = 5
# Realm tok
_lowerCamelCase : List[str] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_lowerCamelCase : int = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(_lowercase , exist_ok=_lowercase )
_lowerCamelCase : Any = os.path.join(_lowercase , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(_lowercase , exist_ok=_lowercase )
def a__ ( self ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def a__ ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> str:
_lowerCamelCase : Optional[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def a__ ( self ) -> Optional[int]:
_lowerCamelCase : Tuple = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def a__ ( self ) -> int:
_lowerCamelCase : Dict = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=_lowercase , )
return block_records
def a__ ( self ) -> Dict:
_lowerCamelCase : Union[str, Any] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def a__ ( self ) -> int:
_lowerCamelCase : int = self.get_config()
_lowerCamelCase : Optional[Any] = self.get_dummy_retriever()
_lowerCamelCase : Union[str, Any] = retriever.tokenizer
_lowerCamelCase : Any = np.array([0, 3] , dtype='''long''' )
_lowerCamelCase : Any = tokenizer(['''Test question'''] ).input_ids
_lowerCamelCase : str = tokenizer(
['''the fourth'''] , add_special_tokens=_lowercase , return_token_type_ids=_lowercase , return_attention_mask=_lowercase , ).input_ids
_lowerCamelCase : List[str] = config.reader_seq_len
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = retriever(
_lowercase , _lowercase , answer_ids=_lowercase , max_length=_lowercase , return_tensors='''np''' )
self.assertEqual(len(_lowercase ) , 2 )
self.assertEqual(len(_lowercase ) , 2 )
self.assertEqual(len(_lowercase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def a__ ( self ) -> int:
_lowerCamelCase : str = self.get_config()
_lowerCamelCase : Union[str, Any] = self.get_dummy_retriever()
_lowerCamelCase : str = retriever.tokenizer
_lowerCamelCase : List[str] = np.array([0, 3, 5] , dtype='''long''' )
_lowerCamelCase : List[Any] = tokenizer(['''Test question'''] ).input_ids
_lowerCamelCase : List[str] = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=_lowercase , return_token_type_ids=_lowercase , return_attention_mask=_lowercase , ).input_ids
_lowerCamelCase : Optional[Any] = config.reader_seq_len
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = retriever(
_lowercase , _lowercase , answer_ids=_lowercase , max_length=_lowercase , return_tensors='''np''' )
self.assertEqual([False, True, True] , _lowercase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _lowercase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _lowercase )
def a__ ( self ) -> int:
_lowerCamelCase : Optional[int] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
_lowerCamelCase : Dict = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
_lowerCamelCase : Optional[Any] = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
_lowerCamelCase : List[Any] = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 434
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = KandinskyImgaImgPipeline
_lowerCAmelCase = ["prompt", "image_embeds", "negative_image_embeds", "image"]
_lowerCAmelCase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
_lowerCAmelCase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_lowerCAmelCase = False
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 100
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : str = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
__a : int = MultilingualCLIP(_lowercase )
__a : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__a : Dict = UNetaDConditionModel(**_lowercase )
return model
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = self.dummy_text_encoder
__a : str = self.dummy_tokenizer
__a : Union[str, Any] = self.dummy_unet
__a : Union[str, Any] = self.dummy_movq
__a : int = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__a : Tuple = DDIMScheduler(**_lowercase )
__a : Optional[int] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase__(self , _lowercase , _lowercase=0 ):
'''simple docstring'''
__a : Tuple = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowercase ) ).to(_lowercase )
__a : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowercase )
# create init_image
__a : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase )
__a : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a : Tuple = Image.fromarray(np.uinta(_lowercase ) ).convert("""RGB""" ).resize((256, 256) )
if str(_lowercase ).startswith("""mps""" ):
__a : Union[str, Any] = torch.manual_seed(_lowercase )
else:
__a : str = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__a : str = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = """cpu"""
__a : Any = self.get_dummy_components()
__a : Optional[Any] = self.pipeline_class(**_lowercase )
__a : Any = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__a : Tuple = pipe(**self.get_dummy_inputs(_lowercase ) )
__a : Optional[Any] = output.images
__a : Union[str, Any] = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
__a : List[str] = image[0, -3:, -3:, -1]
__a : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a : List[str] = np.array(
[0.6147_4943, 0.607_3539, 0.4330_8544, 0.592_8269, 0.4749_3595, 0.4675_5973, 0.461_3838, 0.4536_8797, 0.5011_9233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
__a : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__a : List[Any] = """A red cartoon frog, 4k"""
__a : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_lowercase )
__a : Any = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
__a : str = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
__a : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__a , __a : str = pipe_prior(
_lowercase , generator=_lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__a : Union[str, Any] = pipeline(
_lowercase , image=_lowercase , image_embeds=_lowercase , negative_image_embeds=_lowercase , generator=_lowercase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
__a : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
| 63
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "unispeech"
def __init__(self , _lowercase=32 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=1e-5 , _lowercase="group" , _lowercase="gelu" , _lowercase=(512, 512, 512, 512, 512, 512, 512) , _lowercase=(5, 2, 2, 2, 2, 2, 2) , _lowercase=(10, 3, 3, 3, 3, 2, 2) , _lowercase=False , _lowercase=128 , _lowercase=16 , _lowercase=False , _lowercase=True , _lowercase=0.05 , _lowercase=10 , _lowercase=2 , _lowercase=0.0 , _lowercase=10 , _lowercase=0 , _lowercase=320 , _lowercase=2 , _lowercase=0.1 , _lowercase=100 , _lowercase=256 , _lowercase=256 , _lowercase=0.1 , _lowercase="mean" , _lowercase=False , _lowercase=False , _lowercase=256 , _lowercase=80 , _lowercase=0 , _lowercase=1 , _lowercase=2 , _lowercase=0.5 , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase )
__a : Union[str, Any] = hidden_size
__a : Any = feat_extract_norm
__a : Union[str, Any] = feat_extract_activation
__a : Tuple = list(_lowercase )
__a : Dict = list(_lowercase )
__a : List[Any] = list(_lowercase )
__a : List[Any] = conv_bias
__a : Optional[Any] = num_conv_pos_embeddings
__a : Union[str, Any] = num_conv_pos_embedding_groups
__a : Dict = len(self.conv_dim )
__a : Dict = num_hidden_layers
__a : Union[str, Any] = intermediate_size
__a : List[str] = hidden_act
__a : int = num_attention_heads
__a : int = hidden_dropout
__a : Any = attention_dropout
__a : List[Any] = activation_dropout
__a : List[Any] = feat_proj_dropout
__a : Union[str, Any] = final_dropout
__a : str = layerdrop
__a : Dict = layer_norm_eps
__a : Dict = initializer_range
__a : Union[str, Any] = num_ctc_classes
__a : List[Any] = vocab_size
__a : Any = do_stable_layer_norm
__a : List[str] = use_weighted_layer_sum
__a : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a : Dict = apply_spec_augment
__a : Union[str, Any] = mask_time_prob
__a : List[str] = mask_time_length
__a : Dict = mask_time_min_masks
__a : List[Any] = mask_feature_prob
__a : Tuple = mask_feature_length
__a : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__a : List[Any] = num_codevectors_per_group
__a : Union[str, Any] = num_codevector_groups
__a : List[Any] = contrastive_logits_temperature
__a : Any = feat_quantizer_dropout
__a : Optional[int] = num_negatives
__a : List[str] = codevector_dim
__a : List[Any] = proj_codevector_dim
__a : Tuple = diversity_loss_weight
# ctc loss
__a : Any = ctc_loss_reduction
__a : List[str] = ctc_zero_infinity
# pretraining loss
__a : Tuple = replace_prob
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 63
| 1
|
'''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def _lowerCAmelCase ( __magic_name__ : jnp.ndarray , __magic_name__ : int , __magic_name__ : float = 1 , __magic_name__ : float = 1 , __magic_name__ : float = 1.0E4 , __magic_name__ : bool = False , __magic_name__ : float = 1.0 , ) -> jnp.ndarray:
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
lowercase : int =float(embedding_dim // 2 )
lowercase : Optional[int] =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowercase : Any =min_timescale * jnp.exp(jnp.arange(__magic_name__ , dtype=jnp.floataa ) * -log_timescale_increment )
lowercase : List[Any] =jnp.expand_dims(__magic_name__ , 1 ) * jnp.expand_dims(__magic_name__ , 0 )
# scale embeddings
lowercase : Tuple =scale * emb
if flip_sin_to_cos:
lowercase : Dict =jnp.concatenate([jnp.cos(__magic_name__ ), jnp.sin(__magic_name__ )] , axis=1 )
else:
lowercase : Any =jnp.concatenate([jnp.sin(__magic_name__ ), jnp.cos(__magic_name__ )] , axis=1 )
lowercase : List[str] =jnp.reshape(__magic_name__ , [jnp.shape(__magic_name__ )[0], embedding_dim] )
return signal
class __SCREAMING_SNAKE_CASE ( nn.Module ):
lowerCamelCase_ = 32
lowerCamelCase_ = jnp.floataa
@nn.compact
def __call__( self : Tuple , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : List[Any] =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(UpperCAmelCase__ )
lowercase : Any =nn.silu(UpperCAmelCase__ )
lowercase : int =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(UpperCAmelCase__ )
return temb
class __SCREAMING_SNAKE_CASE ( nn.Module ):
lowerCamelCase_ = 32
lowerCamelCase_ = False
lowerCamelCase_ = 1
@nn.compact
def __call__( self : int , UpperCAmelCase__ : str ):
'''simple docstring'''
return get_sinusoidal_embeddings(
UpperCAmelCase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 92
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , **lowerCAmelCase ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self , lowerCAmelCase , **lowerCAmelCase ) -> int:
'''simple docstring'''
return super().__call__(lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , **lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowercase ={}
if "candidate_labels" in kwargs:
_lowercase =kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_lowercase =kwargs['hypothesis_template']
return preprocess_params, {}, {}
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase="This is a sound of {}." ) -> Tuple:
'''simple docstring'''
if isinstance(lowerCAmelCase , lowerCAmelCase ):
if audio.startswith('http://' ) or audio.startswith('https://' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_lowercase =requests.get(lowerCAmelCase ).content
else:
with open(lowerCAmelCase , 'rb' ) as f:
_lowercase =f.read()
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_lowercase =ffmpeg_read(lowerCAmelCase , self.feature_extractor.sampling_rate )
if not isinstance(lowerCAmelCase , np.ndarray ):
raise ValueError('We expect a numpy ndarray as input' )
if len(audio.shape ) != 1:
raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline' )
_lowercase =self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='pt' )
_lowercase =candidate_labels
_lowercase =[hypothesis_template.format(lowerCAmelCase ) for x in candidate_labels]
_lowercase =self.tokenizer(lowerCAmelCase , return_tensors=self.framework , padding=lowerCAmelCase )
_lowercase =[text_inputs]
return inputs
def A__ ( self , lowerCAmelCase ) -> Dict:
'''simple docstring'''
_lowercase =model_inputs.pop('candidate_labels' )
_lowercase =model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , lowerCAmelCase ):
_lowercase =text_inputs[0]
else:
# Batching case.
_lowercase =text_inputs[0][0]
_lowercase =self.model(**lowerCAmelCase , **lowerCAmelCase )
_lowercase ={
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_audio,
}
return model_outputs
def A__ ( self , lowerCAmelCase ) -> Tuple:
'''simple docstring'''
_lowercase =model_outputs.pop('candidate_labels' )
_lowercase =model_outputs['logits'][0]
if self.framework == "pt":
_lowercase =logits.softmax(dim=0 )
_lowercase =probs.tolist()
else:
raise ValueError('`tf` framework not supported.' )
_lowercase =[
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(lowerCAmelCase , lowerCAmelCase ) , key=lambda lowerCAmelCase : -x[0] )
]
return result
| 291
| 0
|
def lowerCamelCase__ ( a : list ) -> list:
"""simple docstring"""
if len(a ) <= 1:
return lst
a__ :int = 1
while i < len(a ):
if lst[i - 1] <= lst[i]:
i += 1
else:
a__ , a__ :List[Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
a__ :str = 1
return lst
if __name__ == "__main__":
snake_case__ = input('''Enter numbers separated by a comma:\n''').strip()
snake_case__ = [int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 373
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
snake_case__ = False
class lowerCAmelCase_ ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase):
def _snake_case ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Dict ) ->Any:
"""simple docstring"""
a__ :Union[str, Any] = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
a__ :List[Any] = "A painting of a squirrel eating a burger "
a__ :Optional[Any] = torch.manual_seed(0 )
a__ :List[Any] = pipe(
prompt=__A , generator=__A , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__A )
a__ :List[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
a__ :Optional[int] = generator.manual_seed(0 )
a__ :List[Any] = pipe(
prompt=__A , generator=__A , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _snake_case ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
a__ :Tuple = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
a__ :Tuple = "A painting of a squirrel eating a burger "
a__ :Tuple = torch.manual_seed(0 )
a__ :Optional[Any] = pipe(
prompt=__A , generator=__A , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
a__ :Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
a__ :Tuple = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 373
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class _snake_case :
def __init__( self , a__ , a__ ) -> None:
'''simple docstring'''
if len(a__ ) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1." )
snake_case_ = list(a__ )
snake_case_ = degree
def __add__( self , a__ ) -> Polynomial:
'''simple docstring'''
if self.degree > polynomial_a.degree:
snake_case_ = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , a__ )
else:
snake_case_ = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , a__ )
def __sub__( self , a__ ) -> Polynomial:
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ) -> Polynomial:
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , a__ ) -> Polynomial:
'''simple docstring'''
snake_case_ = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , a__ )
def lowerCAmelCase__ ( self , a__ ) -> int | float:
'''simple docstring'''
snake_case_ = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ) -> str:
'''simple docstring'''
snake_case_ = ""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(a__ )
return polynomial
def __repr__( self ) -> str:
'''simple docstring'''
return self.__str__()
def lowerCAmelCase__ ( self ) -> Polynomial:
'''simple docstring'''
snake_case_ = [0] * self.degree
for i in range(self.degree ):
snake_case_ = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , a__ )
def lowerCAmelCase__ ( self , a__ = 0 ) -> Polynomial:
'''simple docstring'''
snake_case_ = [0] * (self.degree + 2)
snake_case_ = constant
for i in range(self.degree + 1 ):
snake_case_ = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , a__ )
def __eq__( self , a__ ) -> bool:
'''simple docstring'''
if not isinstance(a__ , a__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , a__ ) -> bool:
'''simple docstring'''
return not self.__eq__(a__ )
| 400
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[str] = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
_SCREAMING_SNAKE_CASE : Tuple = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
_SCREAMING_SNAKE_CASE : int = {
"abeja/gpt-neox-japanese-2.7b": 2048,
}
def UpperCamelCase_( snake_case : List[Any] , snake_case : List[str] ):
'''simple docstring'''
with open(snake_case , "r" , encoding="utf-8" ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = collections.OrderedDict()
snake_case_ = collections.OrderedDict()
snake_case_ = collections.OrderedDict()
with open(snake_case , "r" , encoding="utf-8" ) as f:
snake_case_ = f.readlines()
snake_case_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(snake_case ):
snake_case_ = b
snake_case_ = idx
for wd in b:
snake_case_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Union[str, Any] = VOCAB_FILES_NAMES
lowerCAmelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : int = ["input_ids", "attention_mask"]
def __init__( self , a__ , a__ , a__="<|endoftext|>" , a__="<|endoftext|>" , a__="<|startoftext|>" , a__="<|endoftext|>" , a__=False , **a__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
unk_token=a__ , pad_token=a__ , bos_token=a__ , eos_token=a__ , do_clean_text=a__ , **a__ , )
if not os.path.isfile(a__ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(a__ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
snake_case_ = do_clean_text
snake_case_ , snake_case_ , snake_case_ , snake_case_ = load_vocab_and_emoji(a__ , a__ )
snake_case_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return len(self.raw_vocab )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self , a__ ) -> Any:
'''simple docstring'''
return self.subword_tokenizer.tokenize(a__ , clean=self.do_clean_text )
def lowerCAmelCase__ ( self , a__ ) -> List[Any]:
'''simple docstring'''
return self.vocab.get(a__ , self.vocab.get(self.unk_token ) )
def lowerCAmelCase__ ( self , a__ ) -> List[Any]:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(a__ )
def lowerCAmelCase__ ( self , a__ ) -> Any:
'''simple docstring'''
snake_case_ = "".join(a__ ).strip()
return out_string
def lowerCAmelCase__ ( self , a__ ) -> List[int]:
'''simple docstring'''
snake_case_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a__ , add_special_tokens=a__ ) + [self.eos_token_id] )
if len(a__ ) > self.model_max_length:
snake_case_ = input_ids[-self.model_max_length :]
return input_ids
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ = 0
if os.path.isdir(a__ ):
snake_case_ = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
snake_case_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
snake_case_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(a__ , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
" Please check that the vocabulary is not corrupted!" )
snake_case_ = token_index
writer.write(",".join(a__ ) + "\n" )
index += 1
with open(a__ , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , a__ )
return vocab_file, emoji_file
class _snake_case ( lowercase_ ):
def __init__( self , a__ , a__ , a__ ) -> Any:
'''simple docstring'''
snake_case_ = vocab # same as swe
snake_case_ = ids_to_tokens # same as bpe
snake_case_ = emoji
snake_case_ = np.max([len(a__ ) for w in self.vocab.keys()] )
snake_case_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
snake_case_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
snake_case_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
snake_case_ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
snake_case_ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
snake_case_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
snake_case_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
snake_case_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
snake_case_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self ) -> List[Any]:
'''simple docstring'''
return len(self.ids_to_tokens )
def lowerCAmelCase__ ( self , a__ ) -> str:
'''simple docstring'''
snake_case_ = self.content_repattera.sub("<URL>" , a__ )
snake_case_ = self.content_repattera.sub("<EMAIL>" , a__ )
snake_case_ = self.content_repattera.sub("<TEL>" , a__ )
snake_case_ = self.content_repattera.sub("<DATE>" , a__ )
snake_case_ = self.content_repattera.sub("<DATE>" , a__ )
snake_case_ = self.content_repattera.sub("<PRICE>" , a__ )
snake_case_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
snake_case_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def lowerCAmelCase__ ( self , a__ , a__=False ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = text.replace(" " , "<SP>" )
snake_case_ = text.replace(" " , "<SP>" )
snake_case_ = text.replace("\r\n" , "<BR>" )
snake_case_ = text.replace("\n" , "<BR>" )
snake_case_ = text.replace("\r" , "<BR>" )
snake_case_ = text.replace("\t" , "<TAB>" )
snake_case_ = text.replace("—" , "ー" )
snake_case_ = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
snake_case_ = text.replace(a__ , a__ )
if clean:
snake_case_ = self.clean_text(a__ )
def check_simbol(a__ ):
snake_case_ = x.encode()
if len(a__ ) == 1 and len(a__ ) == 2:
snake_case_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC_2_A_1 and c <= 0XC_2_B_F)
or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3)
or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F)
or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2)
):
return True
return False
def checkuae(a__ ):
snake_case_ = x.encode()
if len(a__ ) == 1 and len(a__ ) == 3:
snake_case_ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F:
return True
return False
snake_case_ = 0
snake_case_ = []
while pos < len(a__ ):
snake_case_ = min(len(a__ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
snake_case_ = [] # (token_id, token, pos)
for e in range(a__ , a__ , -1 ):
snake_case_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a__ ) > 2:
snake_case_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(a__ ) > 0:
# the smallest token_id is adopted
snake_case_ , snake_case_ , snake_case_ = sorted(a__ , key=lambda a__ : x[0] )[0]
result.append(a__ )
snake_case_ = e
else:
snake_case_ = pos + 1
snake_case_ = text[pos:end]
if check_simbol(a__ ):
result.append("<KIGOU>" )
elif checkuae(a__ ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
snake_case_ = end
return result
def lowerCAmelCase__ ( self , a__ , a__="\n" ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = []
snake_case_ = []
snake_case_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(a__ ) > 0:
words.append(bytearray(a__ ).decode("utf-8" , errors="replace" ) )
snake_case_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(a__ )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(a__ )
if len(a__ ) > 0:
words.append(bytearray(a__ ).decode("utf-8" , errors="replace" ) )
snake_case_ = "".join(a__ )
return text
| 400
| 1
|
"""simple docstring"""
import argparse
import datetime
def a__ ( snake_case__ ) -> str:
lowerCamelCase = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
lowerCamelCase = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(snake_case__ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
lowerCamelCase = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
lowerCamelCase = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
lowerCamelCase = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
lowerCamelCase = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
lowerCamelCase = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 85_00:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
lowerCamelCase = datetime.date(int(snake_case__ ) , int(snake_case__ ) , int(snake_case__ ) )
# Start math
if m <= 2:
lowerCamelCase = y - 1
lowerCamelCase = m + 12
# maths var
lowerCamelCase = int(str(snake_case__ )[:2] )
lowerCamelCase = int(str(snake_case__ )[2:] )
lowerCamelCase = int(2.6 * m - 5.39 )
lowerCamelCase = int(c / 4 )
lowerCamelCase = int(k / 4 )
lowerCamelCase = int(d + k )
lowerCamelCase = int(t + u + v + x )
lowerCamelCase = int(z - (2 * c) )
lowerCamelCase = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
lowerCamelCase = F'Your date {date_input}, is a {days[str(snake_case__ )]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : str = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
lowerCAmelCase : Dict = parser.parse_args()
zeller(args.date_input)
| 705
|
"""simple docstring"""
def a__ ( snake_case__ = 1_00_00_00 ) -> int:
lowerCamelCase = 1
lowerCamelCase = 1
lowerCamelCase = {1: 1}
for inputa in range(2 , snake_case__ ):
lowerCamelCase = 0
lowerCamelCase = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCamelCase = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCamelCase = counter
if counter > pre_counter:
lowerCamelCase = inputa
lowerCamelCase = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 533
| 0
|
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84
|
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = BertConfig.from_json_file(UpperCamelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case_ = BertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_UpperCAmelCase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 362
| 0
|
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCAmelCase_ ( _lowerCamelCase: float , _lowerCamelCase: float , _lowerCamelCase: bool = False ):
if radian_mode:
return [magnitude * cos(_lowerCamelCase ), magnitude * sin(_lowerCamelCase )]
return [magnitude * cos(radians(_lowerCamelCase ) ), magnitude * sin(radians(_lowerCamelCase ) )]
def lowerCAmelCase_ ( _lowerCamelCase: NDArray[floataa] , _lowerCamelCase: NDArray[floataa] , _lowerCamelCase: float = 10**-1 ):
__SCREAMING_SNAKE_CASE : NDArray[floataa] = cross(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : float = sum(_lowerCamelCase )
return abs(_lowerCamelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
UpperCamelCase__ : List[str] = array(
[
polar_force(7_1_8.4, 1_80 - 30),
polar_force(8_7_9.5_4, 45),
polar_force(1_00, -90),
]
)
UpperCamelCase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
UpperCamelCase__ : List[Any] = array(
[
polar_force(30 * 9.8_1, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
UpperCamelCase__ : Union[str, Any] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
UpperCamelCase__ : int = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
UpperCamelCase__ : Optional[Any] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 178
|
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
UpperCamelCase__ : Optional[int] = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Optional[Any] = '''maskformer'''
_A : Optional[int] = {'''hidden_size''': '''mask_feature_size'''}
_A : int = ['''resnet''', '''swin''']
_A : Any = ['''detr''']
def __init__( self : Any , lowerCAmelCase__ : int = 2_5_6 , lowerCAmelCase__ : int = 2_5_6 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[Dict] = None , lowerCAmelCase__ : Optional[Dict] = None , lowerCAmelCase__ : float = 0.02 , lowerCAmelCase__ : float = 1.0 , lowerCAmelCase__ : float = 1.0 , lowerCAmelCase__ : float = 1.0 , lowerCAmelCase__ : float = 20.0 , lowerCAmelCase__ : Optional[bool] = None , **lowerCAmelCase__ : Any , ):
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__SCREAMING_SNAKE_CASE : Any = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Dict = backbone_config.pop("""model_type""" )
__SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE : Union[str, Any] = config_class.from_dict(lowerCAmelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
F"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__SCREAMING_SNAKE_CASE : List[str] = DetrConfig()
else:
# verify that the decoder is supported
__SCREAMING_SNAKE_CASE : List[Any] = (
decoder_config.pop("""model_type""" ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"Transformer Decoder {decoder_type} not supported, please use one of"
F" {','.join(self.decoders_supported )}" )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : int = CONFIG_MAPPING[decoder_type]
__SCREAMING_SNAKE_CASE : Dict = config_class.from_dict(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = backbone_config
__SCREAMING_SNAKE_CASE : Optional[int] = decoder_config
# main feature dimension for the model
__SCREAMING_SNAKE_CASE : List[str] = fpn_feature_size
__SCREAMING_SNAKE_CASE : int = mask_feature_size
# initializer
__SCREAMING_SNAKE_CASE : str = init_std
__SCREAMING_SNAKE_CASE : Optional[Any] = init_xavier_std
# Hungarian matcher && loss
__SCREAMING_SNAKE_CASE : Tuple = cross_entropy_weight
__SCREAMING_SNAKE_CASE : Dict = dice_weight
__SCREAMING_SNAKE_CASE : Union[str, Any] = mask_weight
__SCREAMING_SNAKE_CASE : Optional[Any] = use_auxiliary_loss
__SCREAMING_SNAKE_CASE : Union[str, Any] = no_object_weight
__SCREAMING_SNAKE_CASE : Dict = output_auxiliary_logits
__SCREAMING_SNAKE_CASE : Any = self.decoder_config.encoder_attention_heads
__SCREAMING_SNAKE_CASE : int = self.decoder_config.num_hidden_layers
super().__init__(**lowerCAmelCase__ )
@classmethod
def UpperCamelCase__ ( cls : List[Any] , lowerCAmelCase__ : PretrainedConfig , lowerCAmelCase__ : PretrainedConfig , **lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
return cls(
backbone_config=lowerCAmelCase__ , decoder_config=lowerCAmelCase__ , **lowerCAmelCase__ , )
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE : Dict = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE : str = self.decoder_config.to_dict()
__SCREAMING_SNAKE_CASE : List[str] = self.__class__.model_type
return output
| 178
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Dict = """luke"""
def __init__( self , a=5_0267 , a=50_0000 , a=768 , a=256 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=2 , a=0.02 , a=1e-12 , a=True , a=None , a=1 , a=0 , a=2 , **a , ):
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
lowercase__ : Tuple = vocab_size
lowercase__ : Optional[Any] = entity_vocab_size
lowercase__ : Optional[int] = hidden_size
lowercase__ : List[str] = entity_emb_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : Any = num_attention_heads
lowercase__ : List[str] = hidden_act
lowercase__ : Any = intermediate_size
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : Dict = type_vocab_size
lowercase__ : Tuple = initializer_range
lowercase__ : Any = layer_norm_eps
lowercase__ : Optional[Any] = use_entity_aware_attention
lowercase__ : Union[str, Any] = classifier_dropout
| 164
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
def snake_case_ ( self):
lowercase__ : int = inspect.getfile(accelerate.test_utils)
lowercase__ : Dict = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ['scripts', 'external_deps', 'test_metrics.py'])
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowercase__ : Optional[Any] = test_metrics
@require_cpu
def snake_case_ ( self):
debug_launcher(self.test_metrics.main , num_processes=1)
@require_cpu
def snake_case_ ( self):
debug_launcher(self.test_metrics.main)
@require_single_gpu
def snake_case_ ( self):
self.test_metrics.main()
@require_multi_gpu
def snake_case_ ( self):
print(f"""Found {torch.cuda.device_count()} devices.""")
lowercase__ : int = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(a , env=os.environ.copy())
| 164
| 1
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowercase_ ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
snake_case__ : int =int(number**0.5 )
return number == sq * sq
def lowercase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
snake_case__ : int =x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
snake_case__ : int =x_den * y_den * z_den
snake_case__ : int =gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def lowercase_ ( SCREAMING_SNAKE_CASE : int = 35 ):
"""simple docstring"""
snake_case__ : set =set()
snake_case__ : int
snake_case__ : Fraction =Fraction(0 )
snake_case__ : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
snake_case__ : List[str] =x_num * y_den + x_den * y_num
snake_case__ : Any =x_den * y_den
snake_case__ : Dict =gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
snake_case__ : Optional[int] =add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
# n=2
snake_case__ : Optional[Any] =(
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
snake_case__ : Optional[int] =x_den * x_den * y_den * y_den
if is_sq(SCREAMING_SNAKE_CASE ) and is_sq(SCREAMING_SNAKE_CASE ):
snake_case__ : Any =int(sqrt(SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple =int(sqrt(SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[int] =gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
snake_case__ : str =add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
# n=-1
snake_case__ : str =x_num * y_num
snake_case__ : str =x_den * y_num + x_num * y_den
snake_case__ : int =gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
snake_case__ : List[Any] =add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
# n=2
snake_case__ : Dict =x_num * x_num * y_num * y_num
snake_case__ : str =(
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(SCREAMING_SNAKE_CASE ) and is_sq(SCREAMING_SNAKE_CASE ):
snake_case__ : Dict =int(sqrt(SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[Any] =int(sqrt(SCREAMING_SNAKE_CASE ) )
snake_case__ : int =gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
snake_case__ : Dict =add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 701
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase_ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
snake_case__ : Optional[int] =DPTConfig()
if "large" in checkpoint_url:
snake_case__ : Any =10_24
snake_case__ : Tuple =40_96
snake_case__ : List[Any] =24
snake_case__ : Tuple =16
snake_case__ : List[Any] =[5, 11, 17, 23]
snake_case__ : int =[2_56, 5_12, 10_24, 10_24]
snake_case__ : str =(1, 3_84, 3_84)
if "ade" in checkpoint_url:
snake_case__ : Optional[Any] =True
snake_case__ : Optional[Any] =1_50
snake_case__ : int ='''huggingface/label-files'''
snake_case__ : Union[str, Any] ='''ade20k-id2label.json'''
snake_case__ : int =json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) ) , '''r''' ) )
snake_case__ : int ={int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
snake_case__ : List[str] =idalabel
snake_case__ : List[str] ={v: k for k, v in idalabel.items()}
snake_case__ : List[Any] =[1, 1_50, 4_80, 4_80]
return config, expected_shape
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
snake_case__ : List[Any] =['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowercase_ ( SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
snake_case__ : Optional[Any] =name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
snake_case__ : Dict =name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
snake_case__ : str =name.replace('''patch_embed''' , '''patch_embeddings''' )
if "pos_embed" in name:
snake_case__ : Dict =name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
snake_case__ : int =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
snake_case__ : List[Any] =name.replace('''proj''' , '''projection''' )
if "blocks" in name:
snake_case__ : List[Any] =name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
snake_case__ : Optional[int] =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case__ : List[str] =name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name:
snake_case__ : Any =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
snake_case__ : Any =name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
snake_case__ : str =name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
snake_case__ : Any =name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
snake_case__ : int =name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
snake_case__ : List[str] =name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
snake_case__ : Union[str, Any] =name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
snake_case__ : Dict =name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
snake_case__ : List[str] =int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
snake_case__ : Optional[int] =name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
snake_case__ : Tuple =name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
snake_case__ : Any =name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
snake_case__ : Dict =name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
snake_case__ : Dict =name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
snake_case__ : List[str] =name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
snake_case__ : List[Any] =name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
snake_case__ : int =name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
snake_case__ : Dict =name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
snake_case__ : Union[str, Any] =name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
snake_case__ : int =name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
snake_case__ : Union[str, Any] =name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
snake_case__ : Tuple =name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
snake_case__ : Any =name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
snake_case__ : Tuple =name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
snake_case__ : Any =name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
snake_case__ : Dict =name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
snake_case__ : Union[str, Any] =name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
snake_case__ : Any =name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
snake_case__ : Optional[int] =name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
snake_case__ : Optional[Any] =name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
snake_case__ : List[str] =name.replace('''auxlayer''' , '''auxiliary_head.head''' )
return name
def lowercase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Union[str, Any] =state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
snake_case__ : List[Any] =state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Union[str, Any] =in_proj_weight[: config.hidden_size, :]
snake_case__ : List[Any] =in_proj_bias[: config.hidden_size]
snake_case__ : List[str] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : int =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[Any] =in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Any =in_proj_bias[-config.hidden_size :]
def lowercase_ ( ):
"""simple docstring"""
snake_case__ : Optional[Any] ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ : str =Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowercase_ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
snake_case__, snake_case__ : int =get_dpt_config(SCREAMING_SNAKE_CASE )
# load original state_dict from URL
snake_case__ : List[str] =torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
# rename keys
for key in state_dict.copy().keys():
snake_case__ : List[Any] =state_dict.pop(SCREAMING_SNAKE_CASE )
snake_case__ : Any =val
# read in qkv matrices
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# load HuggingFace model
snake_case__ : Dict =DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# Check outputs on an image
snake_case__ : Tuple =4_80 if '''ade''' in checkpoint_url else 3_84
snake_case__ : str =DPTImageProcessor(size=SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =prepare_img()
snake_case__ : Dict =image_processor(SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
# forward pass
snake_case__ : Optional[int] =model(**SCREAMING_SNAKE_CASE ).logits if '''ade''' in checkpoint_url else model(**SCREAMING_SNAKE_CASE ).predicted_depth
# Assert logits
snake_case__ : Tuple =torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
snake_case__ : Optional[int] =torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(SCREAMING_SNAKE_CASE )
assert (
torch.allclose(outputs[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , SCREAMING_SNAKE_CASE )
)
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
lowerCamelCase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 408
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __magic_name__ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_UpperCAmelCase : Dict =AutoConfig.from_pretrained(snake_case)
self.assertIsNotNone(snake_case)
self.assertIsInstance(snake_case , snake_case)
_UpperCAmelCase : Any =TFAutoModel.from_pretrained(snake_case , from_pt=snake_case)
self.assertIsNotNone(snake_case)
self.assertIsInstance(snake_case , snake_case)
_UpperCAmelCase : Optional[int] =AutoModel.from_pretrained(snake_case , from_tf=snake_case)
self.assertIsNotNone(snake_case)
self.assertIsInstance(snake_case , snake_case)
@slow
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_UpperCAmelCase : int =AutoConfig.from_pretrained(snake_case)
self.assertIsNotNone(snake_case)
self.assertIsInstance(snake_case , snake_case)
_UpperCAmelCase : Dict =TFAutoModelForPreTraining.from_pretrained(snake_case , from_pt=snake_case)
self.assertIsNotNone(snake_case)
self.assertIsInstance(snake_case , snake_case)
_UpperCAmelCase : List[str] =AutoModelForPreTraining.from_pretrained(snake_case , from_tf=snake_case)
self.assertIsNotNone(snake_case)
self.assertIsInstance(snake_case , snake_case)
@slow
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[Any] =AutoConfig.from_pretrained(snake_case)
self.assertIsNotNone(snake_case)
self.assertIsInstance(snake_case , snake_case)
_UpperCAmelCase : List[Any] =TFAutoModelForCausalLM.from_pretrained(snake_case , from_pt=snake_case)
_UpperCAmelCase , _UpperCAmelCase : str =TFAutoModelForCausalLM.from_pretrained(
snake_case , output_loading_info=snake_case , from_pt=snake_case)
self.assertIsNotNone(snake_case)
self.assertIsInstance(snake_case , snake_case)
_UpperCAmelCase : List[str] =AutoModelForCausalLM.from_pretrained(snake_case , from_tf=snake_case)
_UpperCAmelCase , _UpperCAmelCase : List[Any] =AutoModelForCausalLM.from_pretrained(
snake_case , output_loading_info=snake_case , from_tf=snake_case)
self.assertIsNotNone(snake_case)
self.assertIsInstance(snake_case , snake_case)
@slow
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : str =AutoConfig.from_pretrained(snake_case)
self.assertIsNotNone(snake_case)
self.assertIsInstance(snake_case , snake_case)
_UpperCAmelCase : Any =TFAutoModelWithLMHead.from_pretrained(snake_case , from_pt=snake_case)
self.assertIsNotNone(snake_case)
self.assertIsInstance(snake_case , snake_case)
_UpperCAmelCase : List[str] =AutoModelWithLMHead.from_pretrained(snake_case , from_tf=snake_case)
self.assertIsNotNone(snake_case)
self.assertIsInstance(snake_case , snake_case)
@slow
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[int] =AutoConfig.from_pretrained(snake_case)
self.assertIsNotNone(snake_case)
self.assertIsInstance(snake_case , snake_case)
_UpperCAmelCase : Union[str, Any] =TFAutoModelForMaskedLM.from_pretrained(snake_case , from_pt=snake_case)
_UpperCAmelCase , _UpperCAmelCase : Tuple =TFAutoModelForMaskedLM.from_pretrained(
snake_case , output_loading_info=snake_case , from_pt=snake_case)
self.assertIsNotNone(snake_case)
self.assertIsInstance(snake_case , snake_case)
_UpperCAmelCase : Optional[Any] =AutoModelForMaskedLM.from_pretrained(snake_case , from_tf=snake_case)
_UpperCAmelCase , _UpperCAmelCase : Optional[int] =AutoModelForMaskedLM.from_pretrained(
snake_case , output_loading_info=snake_case , from_tf=snake_case)
self.assertIsNotNone(snake_case)
self.assertIsInstance(snake_case , snake_case)
@slow
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : str =AutoConfig.from_pretrained(snake_case)
self.assertIsNotNone(snake_case)
self.assertIsInstance(snake_case , snake_case)
_UpperCAmelCase : int =TFAutoModelForSeqaSeqLM.from_pretrained(snake_case , from_pt=snake_case)
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] =TFAutoModelForSeqaSeqLM.from_pretrained(
snake_case , output_loading_info=snake_case , from_pt=snake_case)
self.assertIsNotNone(snake_case)
self.assertIsInstance(snake_case , snake_case)
_UpperCAmelCase : Dict =AutoModelForSeqaSeqLM.from_pretrained(snake_case , from_tf=snake_case)
_UpperCAmelCase , _UpperCAmelCase : int =AutoModelForSeqaSeqLM.from_pretrained(
snake_case , output_loading_info=snake_case , from_tf=snake_case)
self.assertIsNotNone(snake_case)
self.assertIsInstance(snake_case , snake_case)
@slow
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_UpperCAmelCase : List[str] =AutoConfig.from_pretrained(snake_case)
self.assertIsNotNone(snake_case)
self.assertIsInstance(snake_case , snake_case)
_UpperCAmelCase : Dict =TFAutoModelForSequenceClassification.from_pretrained(snake_case , from_pt=snake_case)
self.assertIsNotNone(snake_case)
self.assertIsInstance(snake_case , snake_case)
_UpperCAmelCase : str =AutoModelForSequenceClassification.from_pretrained(snake_case , from_tf=snake_case)
self.assertIsNotNone(snake_case)
self.assertIsInstance(snake_case , snake_case)
@slow
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_UpperCAmelCase : str =AutoConfig.from_pretrained(snake_case)
self.assertIsNotNone(snake_case)
self.assertIsInstance(snake_case , snake_case)
_UpperCAmelCase : Union[str, Any] =TFAutoModelForQuestionAnswering.from_pretrained(snake_case , from_pt=snake_case)
self.assertIsNotNone(snake_case)
self.assertIsInstance(snake_case , snake_case)
_UpperCAmelCase : Optional[Any] =AutoModelForQuestionAnswering.from_pretrained(snake_case , from_tf=snake_case)
self.assertIsNotNone(snake_case)
self.assertIsInstance(snake_case , snake_case)
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCAmelCase : Dict =TFAutoModelWithLMHead.from_pretrained(snake_case , from_pt=snake_case)
self.assertIsInstance(snake_case , snake_case)
self.assertEqual(model.num_parameters() , 1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=snake_case) , 1_4_4_1_0)
_UpperCAmelCase : List[str] =AutoModelWithLMHead.from_pretrained(snake_case , from_tf=snake_case)
self.assertIsInstance(snake_case , snake_case)
self.assertEqual(model.num_parameters() , 1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=snake_case) , 1_4_4_1_0)
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =TFAutoModelWithLMHead.from_pretrained(snake_case , from_pt=snake_case)
self.assertIsInstance(snake_case , snake_case)
self.assertEqual(model.num_parameters() , 1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=snake_case) , 1_4_4_1_0)
_UpperCAmelCase : Optional[Any] =AutoModelWithLMHead.from_pretrained(snake_case , from_tf=snake_case)
self.assertIsInstance(snake_case , snake_case)
self.assertEqual(model.num_parameters() , 1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=snake_case) , 1_4_4_1_0)
| 446
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self , snake_case , snake_case=2 , snake_case=True , snake_case=False , snake_case=1_0 , snake_case=3 , snake_case=3_2 * 8 , snake_case=3_2 * 8 , snake_case=4 , snake_case=6_4 , ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] =parent
_UpperCAmelCase : Optional[int] =batch_size
_UpperCAmelCase : List[str] =is_training
_UpperCAmelCase : Union[str, Any] =use_auxiliary_loss
_UpperCAmelCase : Dict =num_queries
_UpperCAmelCase : Tuple =num_channels
_UpperCAmelCase : Optional[Any] =min_size
_UpperCAmelCase : Any =max_size
_UpperCAmelCase : Optional[int] =num_labels
_UpperCAmelCase : Optional[int] =hidden_dim
_UpperCAmelCase : Dict =hidden_dim
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : List[str] =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
snake_case)
_UpperCAmelCase : List[str] =torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case)
_UpperCAmelCase : int =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case) > 0.5
).float()
_UpperCAmelCase : Union[str, Any] =(torch.rand((self.batch_size, self.num_labels) , device=snake_case) > 0.5).long()
_UpperCAmelCase : Tuple =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_UpperCAmelCase : int =self.num_queries
_UpperCAmelCase : int =self.num_labels
_UpperCAmelCase : List[Any] =[1, 1, 1, 1]
_UpperCAmelCase : int =self.num_channels
_UpperCAmelCase : List[Any] =6_4
_UpperCAmelCase : Optional[Any] =1_2_8
_UpperCAmelCase : List[Any] =self.hidden_dim
_UpperCAmelCase : Dict =self.hidden_dim
_UpperCAmelCase : int =self.hidden_dim
return config
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] =self.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] ={'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowerCAmelCase ( self , snake_case , snake_case) -> str:
'''simple docstring'''
_UpperCAmelCase : int =output.encoder_hidden_states
_UpperCAmelCase : Dict =output.pixel_decoder_hidden_states
_UpperCAmelCase : List[Any] =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case) , len(config.backbone_config.depths))
self.parent.assertTrue(len(snake_case) , len(config.backbone_config.depths))
self.parent.assertTrue(len(snake_case) , config.decoder_layers)
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case=False) -> Any:
'''simple docstring'''
with torch.no_grad():
_UpperCAmelCase : Tuple =MaskaFormerModel(config=snake_case)
model.to(snake_case)
model.eval()
_UpperCAmelCase : List[Any] =model(pixel_values=snake_case , pixel_mask=snake_case)
_UpperCAmelCase : Union[str, Any] =model(snake_case , output_hidden_states=snake_case)
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(snake_case , snake_case)
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict =MaskaFormerForUniversalSegmentation(config=snake_case)
model.to(snake_case)
model.eval()
def comm_check_on_output(snake_case):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
_UpperCAmelCase : Optional[Any] =model(pixel_values=snake_case , pixel_mask=snake_case)
_UpperCAmelCase : Optional[Any] =model(snake_case)
comm_check_on_output(snake_case)
_UpperCAmelCase : str =model(
pixel_values=snake_case , pixel_mask=snake_case , mask_labels=snake_case , class_labels=snake_case)
comm_check_on_output(snake_case)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class __magic_name__ ( lowerCAmelCase ,lowerCAmelCase ,unittest.TestCase ):
UpperCAmelCase =(MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
UpperCAmelCase ={"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
UpperCAmelCase =False
UpperCAmelCase =False
UpperCAmelCase =False
UpperCAmelCase =False
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =MaskaFormerModelTester(self)
_UpperCAmelCase : Dict =ConfigTester(self , config_class=snake_case , has_text_modality=snake_case)
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(snake_case , **snake_case , output_hidden_states=snake_case)
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*snake_case)
@unittest.skip(reason='Mask2Former does not use inputs_embeds')
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method')
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model')
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings')
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`')
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
pass
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[Any] =model_class(snake_case)
_UpperCAmelCase : Optional[int] =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : str =[*signature.parameters.keys()]
_UpperCAmelCase : Tuple =['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case)
@slow
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_UpperCAmelCase : Dict =MaskaFormerModel.from_pretrained(snake_case)
self.assertIsNotNone(snake_case)
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any =(self.model_tester.min_size,) * 2
_UpperCAmelCase : Optional[Any] ={
'pixel_values': torch.randn((2, 3, *size) , device=snake_case),
'mask_labels': torch.randn((2, 1_0, *size) , device=snake_case),
'class_labels': torch.zeros(2 , 1_0 , device=snake_case).long(),
}
_UpperCAmelCase : List[str] =self.model_tester.get_config()
_UpperCAmelCase : Tuple =MaskaFormerForUniversalSegmentation(snake_case).to(snake_case)
_UpperCAmelCase : Any =model(**snake_case)
self.assertTrue(outputs.loss is not None)
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(snake_case , **snake_case , output_hidden_states=snake_case)
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] =model_class(snake_case).to(snake_case)
_UpperCAmelCase : Dict =model(**snake_case , output_attentions=snake_case)
self.assertTrue(outputs.attentions is not None)
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCAmelCase : List[str] =self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict =self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : List[str] =model_class(snake_case)
model.to(snake_case)
model.train()
_UpperCAmelCase : Tuple =model(snake_case , mask_labels=snake_case , class_labels=snake_case).loss
loss.backward()
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple =self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str =self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Tuple =True
_UpperCAmelCase : str =True
_UpperCAmelCase : Union[str, Any] =model_class(snake_case).to(snake_case)
model.train()
_UpperCAmelCase : Union[str, Any] =model(snake_case , mask_labels=snake_case , class_labels=snake_case)
_UpperCAmelCase : Any =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase : Any =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_UpperCAmelCase : int =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase : List[str] =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
lowercase =1e-4
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class __magic_name__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple =MaskaFormerModel.from_pretrained(self.model_checkpoints).to(snake_case)
_UpperCAmelCase : List[str] =self.default_image_processor
_UpperCAmelCase : int =prepare_img()
_UpperCAmelCase : Any =image_processor(snake_case , return_tensors='pt').to(snake_case)
_UpperCAmelCase : Dict =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(snake_case , (1, 3, 3_8_4, 3_8_4))
with torch.no_grad():
_UpperCAmelCase : int =model(**snake_case)
_UpperCAmelCase : List[Any] =torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]]).to(snake_case)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case , atol=snake_case))
_UpperCAmelCase : Dict =torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]]).to(snake_case)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case , atol=snake_case))
_UpperCAmelCase : Optional[int] =torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]]).to(snake_case)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case , atol=snake_case))
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(snake_case).eval()
_UpperCAmelCase : str =self.default_image_processor
_UpperCAmelCase : Any =prepare_img()
_UpperCAmelCase : Dict =image_processor(snake_case , return_tensors='pt').to(snake_case)
_UpperCAmelCase : List[Any] =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(snake_case , (1, 3, 3_8_4, 3_8_4))
with torch.no_grad():
_UpperCAmelCase : Dict =model(**snake_case)
# masks_queries_logits
_UpperCAmelCase : Union[str, Any] =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4))
_UpperCAmelCase : str =[
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
_UpperCAmelCase : Union[str, Any] =torch.tensor(snake_case).to(snake_case)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case , atol=snake_case))
# class_queries_logits
_UpperCAmelCase : Any =outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1))
_UpperCAmelCase : Optional[Any] =torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
]).to(snake_case)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case , atol=snake_case))
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Tuple =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(snake_case).eval()
_UpperCAmelCase : Optional[Any] =self.default_image_processor
_UpperCAmelCase : List[Any] =image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3)), np.zeros((3, 8_0_0, 1_3_3_3))] , segmentation_maps=[np.zeros((3_8_4, 3_8_4)).astype(np.floataa), np.zeros((3_8_4, 3_8_4)).astype(np.floataa)] , return_tensors='pt' , )
_UpperCAmelCase : int =inputs['pixel_values'].to(snake_case)
_UpperCAmelCase : Union[str, Any] =[el.to(snake_case) for el in inputs['mask_labels']]
_UpperCAmelCase : Tuple =[el.to(snake_case) for el in inputs['class_labels']]
with torch.no_grad():
_UpperCAmelCase : List[str] =model(**snake_case)
self.assertTrue(outputs.loss is not None)
| 446
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Tuple , a : Optional[Any] , a : Tuple=7 , a : int=3 , a : List[str]=18 , a : Union[str, Any]=30 , a : List[str]=400 , a : Optional[int]=True , a : Optional[int]=None , a : Dict=True , a : Tuple=None , a : List[Any]=True , a : int=[0.5, 0.5, 0.5] , a : List[Any]=[0.5, 0.5, 0.5] , a : Tuple=False , ) -> int:
"""simple docstring"""
lowercase = size if size is not None else {'''height''': 20, '''width''': 20}
lowercase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = image_size
lowercase = min_resolution
lowercase = max_resolution
lowercase = do_resize
lowercase = size
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_normalize
lowercase = image_mean
lowercase = image_std
lowercase = do_reduce_labels
def _lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def A_ ( ):
lowercase = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowercase = Image.open(dataset[0]['''file'''] )
lowercase = Image.open(dataset[1]['''file'''] )
return image, map
def A_ ( ):
lowercase = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowercase = Image.open(ds[0]['''file'''] )
lowercase = Image.open(ds[1]['''file'''] )
lowercase = Image.open(ds[2]['''file'''] )
lowercase = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _lowerCAmelCase ( __snake_case , unittest.TestCase ):
__lowerCAmelCase : Tuple = BeitImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
lowercase = BeitImageProcessingTester(self )
@property
def _lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , '''do_resize''' ) )
self.assertTrue(hasattr(a , '''size''' ) )
self.assertTrue(hasattr(a , '''do_center_crop''' ) )
self.assertTrue(hasattr(a , '''center_crop''' ) )
self.assertTrue(hasattr(a , '''do_normalize''' ) )
self.assertTrue(hasattr(a , '''image_mean''' ) )
self.assertTrue(hasattr(a , '''image_std''' ) )
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , a )
lowercase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=a )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , a )
def _lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase = image_processing(a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase = image_processing(a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase = image_processing(a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
lowercase = []
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
lowercase = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
lowercase = image_processing(a , a , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
lowercase , lowercase = prepare_semantic_single_inputs()
lowercase = image_processing(a , a , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
lowercase , lowercase = prepare_semantic_batch_inputs()
lowercase = image_processing(a , a , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def _lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
lowercase = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
lowercase , lowercase = prepare_semantic_single_inputs()
lowercase = image_processing(a , a , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
lowercase = True
lowercase = image_processing(a , a , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 717
|
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__lowerCAmelCase = '''http://www.mocksite.com/file1.txt'''
__lowerCAmelCase = '''"text": ["foo", "foo"]'''
__lowerCAmelCase = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'''
class _lowerCAmelCase :
__lowerCAmelCase : List[Any] = 2_00
__lowerCAmelCase : Dict = {'''Content-Length''': '''100'''}
__lowerCAmelCase : Dict = {}
def _lowerCAmelCase ( self : Any , **a : Dict ) -> Dict:
"""simple docstring"""
return [bytes(a , '''utf-8''' )]
def A_ ( *__UpperCamelCase : int , **__UpperCamelCase : Any ):
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def A_ ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Tuple ):
import requests
monkeypatch.setattr(__UpperCamelCase , '''request''' , __UpperCamelCase )
lowercase = URL
if issubclass(__UpperCamelCase , __UpperCamelCase ):
lowercase = url
elif issubclass(__UpperCamelCase , __UpperCamelCase ):
lowercase = [url]
elif issubclass(__UpperCamelCase , __UpperCamelCase ):
lowercase = {'''train''': url}
lowercase = '''dummy'''
lowercase = '''downloads'''
lowercase = tmp_path
lowercase = DownloadConfig(
cache_dir=os.path.join(__UpperCamelCase , __UpperCamelCase ) , use_etag=__UpperCamelCase , )
lowercase = DownloadManager(dataset_name=__UpperCamelCase , download_config=__UpperCamelCase )
lowercase = dl_manager.download(__UpperCamelCase )
lowercase = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
lowercase = [downloaded_paths]
lowercase = [urls]
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
assert "train" in downloaded_paths.keys()
lowercase = downloaded_paths.values()
lowercase = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__UpperCamelCase , __UpperCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowercase = Path(__UpperCamelCase )
lowercase = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowercase = downloaded_path.read_text()
assert content == CONTENT
lowercase = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
lowercase = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def A_ ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str ):
lowercase = str(__UpperCamelCase )
if issubclass(__UpperCamelCase , __UpperCamelCase ):
lowercase = filename
elif issubclass(__UpperCamelCase , __UpperCamelCase ):
lowercase = [filename]
elif issubclass(__UpperCamelCase , __UpperCamelCase ):
lowercase = {'''train''': filename}
lowercase = '''dummy'''
lowercase = xz_file.parent
lowercase = '''extracted'''
lowercase = DownloadConfig(
cache_dir=__UpperCamelCase , use_etag=__UpperCamelCase , )
lowercase = DownloadManager(dataset_name=__UpperCamelCase , download_config=__UpperCamelCase )
lowercase = dl_manager.extract(__UpperCamelCase )
lowercase = paths
for extracted_paths in [extracted_paths]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
lowercase = [extracted_paths]
lowercase = [paths]
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
assert "train" in extracted_paths.keys()
lowercase = extracted_paths.values()
lowercase = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__UpperCamelCase , __UpperCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowercase = Path(__UpperCamelCase )
lowercase = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__UpperCamelCase , etag=__UpperCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowercase = extracted_path.read_text()
lowercase = text_file.read_text()
assert extracted_file_content == expected_file_content
def A_ ( __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] ):
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(__UpperCamelCase , start=1 ):
lowercase = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def A_ ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] ):
lowercase = request.getfixturevalue(__UpperCamelCase )
lowercase = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__UpperCamelCase ) , start=1 ):
_test_jsonl(__UpperCamelCase , __UpperCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def A_ ( __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] ):
lowercase = request.getfixturevalue(__UpperCamelCase )
lowercase = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__UpperCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__UpperCamelCase ) , start=1 ):
_test_jsonl(__UpperCamelCase , __UpperCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def A_ ( __UpperCamelCase : Union[str, Any] ):
lowercase = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__UpperCamelCase ) , start=1 ):
assert os.path.basename(__UpperCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 396
| 0
|
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class _lowerCamelCase ( snake_case_ ):
'''simple docstring'''
__lowercase : Optional[Any] = ComputeEnvironment.AMAZON_SAGEMAKER
__lowercase : List[Any] = True
__lowercase : List[str] = '''ml.p3.2xlarge'''
__lowercase : Tuple = '''accelerate_sagemaker_execution_role'''
__lowercase : Any = '''hf-sm'''
__lowercase : Union[str, Any] = '''us-east-1'''
__lowercase : Optional[int] = 1
__lowercase : int = '''accelerate-sagemaker-1'''
__lowercase : Optional[Any] = '''1.6'''
__lowercase : int = '''4.4'''
__lowercase : Optional[int] = '''train.py'''
__lowercase : Any = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
__lowercase : Tuple = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
"""simple docstring"""
__A : Union[str, Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['model_name_or_path'] , __lowercase )
assert isinstance(converted_args['do_train'] , __lowercase )
assert isinstance(converted_args['epochs'] , __lowercase )
assert isinstance(converted_args['learning_rate'] , __lowercase )
assert isinstance(converted_args['max_steps'] , __lowercase )
with pytest.raises(__lowercase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 365
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _lowercase ( ):
__A : Dict = ArgumentParser('Accelerate CLI tool', usage='accelerate <command> [<args>]', allow_abbrev=UpperCamelCase__ )
__A : Any = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=UpperCamelCase__ )
env_command_parser(subparsers=UpperCamelCase__ )
launch_command_parser(subparsers=UpperCamelCase__ )
tpu_command_parser(subparsers=UpperCamelCase__ )
test_command_parser(subparsers=UpperCamelCase__ )
# Let's go
__A : Optional[Any] = parser.parse_args()
if not hasattr(UpperCamelCase__, 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 365
| 1
|
_SCREAMING_SNAKE_CASE = 9.8_0665
def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float = g ):
'''simple docstring'''
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 714
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_SCREAMING_SNAKE_CASE = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class lowerCAmelCase_ ( unittest.TestCase ):
__lowerCamelCase : Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__lowerCamelCase : str = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__lowerCamelCase : Optional[int] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__lowerCamelCase : List[Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def _snake_case ( self ) -> str:
_lowerCAmelCase = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
_lowerCAmelCase = text_classifier("This is great !" )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"label": "LABEL_0", "score": 0.504}] )
_lowerCAmelCase = text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}] )
_lowerCAmelCase = text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
_lowerCAmelCase = text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"label": "LABEL_0", "score": 0.504}] )
# Legacy behavior
_lowerCAmelCase = text_classifier("This is great !" , return_all_scores=_lowerCAmelCase )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"label": "LABEL_0", "score": 0.504}] )
_lowerCAmelCase = text_classifier("This is great !" , return_all_scores=_lowerCAmelCase )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]] )
_lowerCAmelCase = text_classifier(["This is great !", "Something else"] , return_all_scores=_lowerCAmelCase )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
_lowerCAmelCase = text_classifier(["This is great !", "Something else"] , return_all_scores=_lowerCAmelCase )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
{"label": "LABEL_0", "score": 0.504},
{"label": "LABEL_0", "score": 0.504},
] , )
@require_torch
def _snake_case ( self ) -> Tuple:
import torch
_lowerCAmelCase = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
_lowerCAmelCase = text_classifier("This is great !" )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"label": "LABEL_0", "score": 0.504}] )
@require_tf
def _snake_case ( self ) -> Any:
_lowerCAmelCase = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
_lowerCAmelCase = text_classifier("This is great !" )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"label": "LABEL_0", "score": 0.504}] )
@slow
@require_torch
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = pipeline("text-classification" )
_lowerCAmelCase = text_classifier("This is great !" )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"label": "POSITIVE", "score": 1.0}] )
_lowerCAmelCase = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"label": "NEGATIVE", "score": 1.0}] )
_lowerCAmelCase = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"label": "POSITIVE", "score": 0.988}] )
@slow
@require_tf
def _snake_case ( self ) -> Any:
_lowerCAmelCase = pipeline("text-classification" , framework="tf" )
_lowerCAmelCase = text_classifier("This is great !" )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"label": "POSITIVE", "score": 1.0}] )
_lowerCAmelCase = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"label": "NEGATIVE", "score": 1.0}] )
_lowerCAmelCase = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"label": "POSITIVE", "score": 0.988}] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_lowerCAmelCase = TextClassificationPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
_lowerCAmelCase = "HuggingFace is in"
_lowerCAmelCase = text_classifier(_lowerCAmelCase )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{"label": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
_lowerCAmelCase = ["HuggingFace is in ", "Paris is in France"]
_lowerCAmelCase = text_classifier(_lowerCAmelCase )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [{"label": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase )}, {"label": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
_lowerCAmelCase = text_classifier(_lowerCAmelCase , top_k=_lowerCAmelCase )
_lowerCAmelCase = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [[{"label": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase )}] * N, [{"label": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase )}] * N] , )
_lowerCAmelCase = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
_lowerCAmelCase = text_classifier(_lowerCAmelCase )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , {"label": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
_lowerCAmelCase = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(_lowerCAmelCase ):
text_classifier(_lowerCAmelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
_lowerCAmelCase = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [{"label": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 489
| 0
|
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(1_25.50, 0.05) = }''')
| 66
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["PoolFormerFeatureExtractor"]
UpperCamelCase = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 66
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ):
lowercase = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
lowercase = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
sd_pipe.set_scheduler("sample_euler" )
lowercase = "A painting of a squirrel eating a burger"
lowercase = torch.manual_seed(0 )
lowercase = sd_pipe([prompt] , generator=lowerCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase( self ):
lowercase = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowercase = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
sd_pipe.set_scheduler("sample_euler" )
lowercase = "A painting of a squirrel eating a burger"
lowercase = torch.manual_seed(0 )
lowercase = sd_pipe([prompt] , generator=lowerCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def UpperCamelCase( self ):
lowercase = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowercase = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
lowercase = "A painting of a squirrel eating a burger"
lowercase = torch.manual_seed(0 )
lowercase = sd_pipe(
[prompt] , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=lowerCamelCase , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 706
|
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__magic_name__ : Union[str, Any] = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__magic_name__ : int = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=SCREAMING_SNAKE_CASE__ )[0]
@deprecated(SCREAMING_SNAKE_CASE__ , "Please use tf.data to implement this functionality." )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream:
_snake_case = _readaa(SCREAMING_SNAKE_CASE__ )
if magic != 20_51:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
_snake_case = _readaa(SCREAMING_SNAKE_CASE__ )
_snake_case = _readaa(SCREAMING_SNAKE_CASE__ )
_snake_case = _readaa(SCREAMING_SNAKE_CASE__ )
_snake_case = bytestream.read(rows * cols * num_images )
_snake_case = numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta )
_snake_case = data.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
return data
@deprecated(SCREAMING_SNAKE_CASE__ , "Please use tf.one_hot on tensors." )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = labels_dense.shape[0]
_snake_case = numpy.arange(SCREAMING_SNAKE_CASE__ ) * num_classes
_snake_case = numpy.zeros((num_labels, num_classes) )
_snake_case = 1
return labels_one_hot
@deprecated(SCREAMING_SNAKE_CASE__ , "Please use tf.data to implement this functionality." )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=10 ):
'''simple docstring'''
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream:
_snake_case = _readaa(SCREAMING_SNAKE_CASE__ )
if magic != 20_49:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
_snake_case = _readaa(SCREAMING_SNAKE_CASE__ )
_snake_case = bytestream.read(SCREAMING_SNAKE_CASE__ )
_snake_case = numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return labels
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@deprecated(
lowerCamelCase , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=dtypes.floataa , lowerCamelCase=True , lowerCamelCase=None , ):
_snake_case , _snake_case = random_seed.get_seed(lowerCamelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
_snake_case = dtypes.as_dtype(lowerCamelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
_snake_case = 10_000
_snake_case = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'''images.shape: {images.shape} labels.shape: {labels.shape}'''
_snake_case = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
_snake_case = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
_snake_case = images.astype(numpy.floataa )
_snake_case = numpy.multiply(lowerCamelCase , 1.0 / 255.0 )
_snake_case = images
_snake_case = labels
_snake_case = 0
_snake_case = 0
@property
def UpperCamelCase( self ):
return self._images
@property
def UpperCamelCase( self ):
return self._labels
@property
def UpperCamelCase( self ):
return self._num_examples
@property
def UpperCamelCase( self ):
return self._epochs_completed
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=True ):
if fake_data:
_snake_case = [1] * 784
_snake_case = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(lowerCamelCase )],
[fake_label for _ in range(lowerCamelCase )],
)
_snake_case = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
_snake_case = numpy.arange(self._num_examples )
numpy.random.shuffle(lowerCamelCase )
_snake_case = self.images[perma]
_snake_case = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
_snake_case = self._num_examples - start
_snake_case = self._images[start : self._num_examples]
_snake_case = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
_snake_case = numpy.arange(self._num_examples )
numpy.random.shuffle(lowerCamelCase )
_snake_case = self.images[perm]
_snake_case = self.labels[perm]
# Start next epoch
_snake_case = 0
_snake_case = batch_size - rest_num_examples
_snake_case = self._index_in_epoch
_snake_case = self._images[start:end]
_snake_case = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
_snake_case = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(SCREAMING_SNAKE_CASE__ , "Please write your own downloading logic." )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not gfile.Exists(SCREAMING_SNAKE_CASE__ ):
gfile.MakeDirs(SCREAMING_SNAKE_CASE__ )
_snake_case = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not gfile.Exists(SCREAMING_SNAKE_CASE__ ):
urllib.request.urlretrieve(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # noqa: S310
with gfile.GFile(SCREAMING_SNAKE_CASE__ ) as f:
_snake_case = f.size()
print("Successfully downloaded" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "bytes." )
return filepath
@deprecated(
SCREAMING_SNAKE_CASE__ , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=dtypes.floataa , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=50_00 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=DEFAULT_SOURCE_URL , ):
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , seed=SCREAMING_SNAKE_CASE__ )
_snake_case = fake()
_snake_case = fake()
_snake_case = fake()
return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
if not source_url: # empty string check
_snake_case = DEFAULT_SOURCE_URL
_snake_case = "train-images-idx3-ubyte.gz"
_snake_case = "train-labels-idx1-ubyte.gz"
_snake_case = "t10k-images-idx3-ubyte.gz"
_snake_case = "t10k-labels-idx1-ubyte.gz"
_snake_case = _maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , "rb" ) as f:
_snake_case = _extract_images(SCREAMING_SNAKE_CASE__ )
_snake_case = _maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , "rb" ) as f:
_snake_case = _extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ )
_snake_case = _maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , "rb" ) as f:
_snake_case = _extract_images(SCREAMING_SNAKE_CASE__ )
_snake_case = _maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , "rb" ) as f:
_snake_case = _extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ )
if not 0 <= validation_size <= len(SCREAMING_SNAKE_CASE__ ):
_snake_case = (
"Validation size should be between 0 and "
f'''{len(SCREAMING_SNAKE_CASE__ )}. Received: {validation_size}.'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
_snake_case = train_images[:validation_size]
_snake_case = train_labels[:validation_size]
_snake_case = train_images[validation_size:]
_snake_case = train_labels[validation_size:]
_snake_case = {"dtype": dtype, "reshape": reshape, "seed": seed}
_snake_case = _DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
_snake_case = _DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
_snake_case = _DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
| 368
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( __A : Tuple , __A : Union[str, Any] , __A : Optional[int] ) -> str:
"""simple docstring"""
lowercase : Union[str, Any] =BertConfig.from_json_file(__A )
print(F'Building PyTorch model from configuration: {config}' )
lowercase : Optional[int] =BertForPreTraining(__A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__A , __A , __A )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 94
|
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def _lowercase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
__A : Tuple = TOKENIZER_CLASSES
else:
__A : Dict = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE , tokenizer_name + "Fast" )}
logger.info(f"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
__A : int = TOKENIZER_CLASSES[tokenizer_name]
__A : Union[str, Any] = True
if checkpoint_name is None:
__A : Dict = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__A : str = [checkpoint_name]
logger.info(f"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(f"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
__A : List[str] = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE )
# Save fast tokenizer
logger.info(f"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
__A ,__A : Optional[Any] = checkpoint.split("/" )
__A : str = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif add_prefix:
__A : Optional[int] = checkpoint
__A : Any = dump_path
else:
__A : int = None
__A : Optional[int] = dump_path
logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__A : Any = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__A : Optional[int] = file_path.split(SCREAMING_SNAKE_CASE )[-1][0]
if next_char == "/":
__A : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__A : Union[str, Any] = None
logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
__A : Tuple = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE , filename_prefix=SCREAMING_SNAKE_CASE )
logger.info(f"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(SCREAMING_SNAKE_CASE )
logger.info(f"=> removing {file_name}" )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
_UpperCamelCase = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 111
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
A__: List[Any] = None
A__: List[str] = logging.get_logger(__name__)
A__: Union[str, Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A__: Any = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
A__: str = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
A__: Tuple = '''▁'''
# Segments (not really needed)
A__: int = 0
A__: Tuple = 1
A__: int = 2
A__: Dict = 3
A__: Union[str, Any] = 4
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str = VOCAB_FILES_NAMES
__UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Dict = "left"
__UpperCamelCase : Union[str, Any] = XLNetTokenizer
def __init__( self :Any , SCREAMING_SNAKE_CASE :List[Any]=None , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :Tuple=False , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Optional[int]=False , SCREAMING_SNAKE_CASE :List[Any]="<s>" , SCREAMING_SNAKE_CASE :int="</s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE :Optional[int]="<sep>" , SCREAMING_SNAKE_CASE :Any="<pad>" , SCREAMING_SNAKE_CASE :List[str]="<cls>" , SCREAMING_SNAKE_CASE :Optional[Any]="<mask>" , SCREAMING_SNAKE_CASE :Union[str, Any]=["<eop>", "<eod>"] , **SCREAMING_SNAKE_CASE :List[Any] , ) -> List[Any]:
'''simple docstring'''
_a : List[str] =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , additional_special_tokens=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_a : Optional[int] =3
_a : Dict =do_lower_case
_a : str =remove_space
_a : List[Any] =keep_accents
_a : List[Any] =vocab_file
_a : Optional[Any] =False if not self.vocab_file else True
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_a : List[Any] =[self.sep_token_id]
_a : Dict =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_a : str =[self.sep_token_id]
_a : Any =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a : Tuple =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 710
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A__: Any = logging.get_logger(__name__)
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict = ["pixel_values"]
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE :Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE :Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : Any =size if size is not None else {"""shortest_edge""": 2_5_6}
_a : int =get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
_a : Union[str, Any] =get_size_dict(SCREAMING_SNAKE_CASE )
_a : List[Any] =do_resize
_a : Optional[int] =size
_a : Union[str, Any] =resample
_a : List[Any] =do_center_crop
_a : Optional[Any] =crop_size
_a : List[Any] =do_rescale
_a : Optional[int] =rescale_factor
_a : int =do_normalize
_a : Tuple =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : Any =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :Any , ) -> np.ndarray:
'''simple docstring'''
_a : Any =get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
_a : Union[str, Any] =get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=size["""shortest_edge"""] , default_to_square=SCREAMING_SNAKE_CASE )
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
_a : List[str] =get_size_dict(SCREAMING_SNAKE_CASE )
return center_crop(SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :Optional[Any] ) -> np.ndarray:
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :Union[float, List[float]] , SCREAMING_SNAKE_CASE :Union[float, List[float]] , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :ImageInput , SCREAMING_SNAKE_CASE :Optional[bool] = None , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :PILImageResampling = None , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :Optional[bool] = None , SCREAMING_SNAKE_CASE :Optional[float] = None , SCREAMING_SNAKE_CASE :Optional[bool] = None , SCREAMING_SNAKE_CASE :Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE :Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE :Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE :Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE :Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[Any] =do_resize if do_resize is not None else self.do_resize
_a : int =size if size is not None else self.size
_a : str =get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
_a : int =resample if resample is not None else self.resample
_a : Optional[int] =do_center_crop if do_center_crop is not None else self.do_center_crop
_a : Union[str, Any] =crop_size if crop_size is not None else self.crop_size
_a : Any =get_size_dict(SCREAMING_SNAKE_CASE )
_a : Any =do_rescale if do_rescale is not None else self.do_rescale
_a : List[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
_a : str =do_normalize if do_normalize is not None else self.do_normalize
_a : Union[str, Any] =image_mean if image_mean is not None else self.image_mean
_a : Optional[Any] =image_std if image_std is not None else self.image_std
_a : List[Any] =make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_a : int =[to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
_a : Tuple =[self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
_a : Union[str, Any] =[self.center_crop(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
_a : int =[self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
_a : Any =[self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images]
_a : Tuple =[to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
_a : Any ={"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 506
| 0
|
from itertools import permutations
def lowercase__ ( A_: tuple ) -> bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__UpperCAmelCase =[7, 11, 13, 17]
for i, test in enumerate(A_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase__ ( A_: int = 10 ) -> int:
"""simple docstring"""
return sum(
int("""""".join(map(A_ , A_ ) ) )
for num in permutations(range(A_ ) )
if is_substring_divisible(A_ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 68
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = int(number**0.5 )
return number == sq * sq
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCAmelCase_ = x_den * y_den * z_den
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
top //= hcf
bottom //= hcf
return top, bottom
def a__ ( lowerCAmelCase__ = 35 ):
UpperCAmelCase_ = set()
UpperCAmelCase_ = 42
UpperCAmelCase_ = Fraction(0 )
UpperCAmelCase_ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCAmelCase_ = x_num * y_den + x_den * y_num
UpperCAmelCase_ = x_den * y_den
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
UpperCAmelCase_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCAmelCase_ = x_den * x_den * y_den * y_den
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=-1
UpperCAmelCase_ = x_num * y_num
UpperCAmelCase_ = x_den * y_num + x_num * y_den
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
UpperCAmelCase_ = x_num * x_num * y_num * y_num
UpperCAmelCase_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
for num, den in unique_s:
total += Fraction(lowerCAmelCase__ , lowerCAmelCase__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"{solution() = }")
| 82
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
A = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 704
|
A = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowerCamelCase ( UpperCamelCase : str ) -> int:
_lowerCamelCase = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
_lowerCamelCase = Stack()
_lowerCamelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(UpperCamelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(UpperCamelCase )
elif i == ")":
# RULE 4
_lowerCamelCase = operator_stack.peek()
operator_stack.pop()
_lowerCamelCase = operand_stack.peek()
operand_stack.pop()
_lowerCamelCase = operand_stack.peek()
operand_stack.pop()
_lowerCamelCase = operators[opr](UpperCamelCase , UpperCamelCase )
operand_stack.push(UpperCamelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
A = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 234
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase( __snake_case , unittest.TestCase ):
lowercase__ = DebertaTokenizer
lowercase__ = True
lowercase__ = DebertaTokenizerFast
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
_UpperCamelCase = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE))))
_UpperCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_UpperCamelCase = {'''unk_token''': '''[UNK]'''}
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE))
def UpperCAmelCase ( self , **__a) -> List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = '''lower newer'''
return input_text, output_text
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_UpperCamelCase = tokenizer.tokenize(__SCREAMING_SNAKE_CASE)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
_UpperCamelCase = tokens + [tokenizer.unk_token]
_UpperCamelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = tokenizer('''Hello''' , '''World''')
_UpperCamelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __SCREAMING_SNAKE_CASE)
@slow
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''')
_UpperCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE)
_UpperCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE)
_UpperCamelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE)
_UpperCamelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE)
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE)
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class)
for tokenizer_class in tokenizer_classes:
_UpperCamelCase = tokenizer_class.from_pretrained('''microsoft/deberta-base''')
_UpperCamelCase = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
_UpperCamelCase = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE)
_UpperCamelCase = [tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE) for seq in encoding['''input_ids''']]
# fmt: off
_UpperCamelCase = {
'''input_ids''': [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
_UpperCamelCase = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __SCREAMING_SNAKE_CASE)
for expected, decoded in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
| 19
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 549
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
_UpperCAmelCase : Any = logging.get_logger(__name__)
class lowercase ( lowercase_ ):
def __init__( self , *snake_case , **snake_case ):
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' , snake_case , )
super().__init__(*snake_case , **snake_case )
| 711
|
_UpperCAmelCase : str = [0, 2, 4, 6, 8]
_UpperCAmelCase : Any = [1, 3, 5, 7, 9]
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
snake_case_ = 0
for digit in range(10 ):
snake_case_ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , UpperCamelCase__ , UpperCamelCase__ )
return result
snake_case_ = 0
for digita in range(10 ):
snake_case_ = digita
if (remainder + digita) % 2 == 0:
snake_case_ = ODD_DIGITS
else:
snake_case_ = EVEN_DIGITS
for digita in other_parity_digits:
snake_case_ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , UpperCamelCase__ , UpperCamelCase__ , )
return result
def __lowerCamelCase ( UpperCamelCase__ = 9 ):
'''simple docstring'''
snake_case_ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(UpperCamelCase__ , 0 , [0] * length , UpperCamelCase__ )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 108
| 0
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase : Any = 16
UpperCamelCase : Any = 32
def A__ ( __lowerCAmelCase : Accelerator , __lowerCAmelCase : int = 16 ):
lowerCamelCase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCamelCase__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__lowerCAmelCase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase__ = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__lowerCAmelCase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase__ = 16
elif accelerator.mixed_precision != "no":
lowerCamelCase__ = 8
else:
lowerCamelCase__ = None
return tokenizer.pad(
__lowerCAmelCase , padding="""longest""" , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCamelCase__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
lowerCamelCase__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase : int = mocked_dataloaders # noqa: F811
def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __lowerCAmelCase ) == "1":
lowerCamelCase__ = 2
# Initialize accelerator
lowerCamelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ = config["""lr"""]
lowerCamelCase__ = int(config["""num_epochs"""] )
lowerCamelCase__ = int(config["""seed"""] )
lowerCamelCase__ = int(config["""batch_size"""] )
lowerCamelCase__ = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
lowerCamelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase__ = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase__ = MAX_GPU_BATCH_SIZE
set_seed(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase__ = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase__ = AdamW(params=model.parameters() , lr=__lowerCAmelCase )
# Instantiate scheduler
lowerCamelCase__ = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase__ = model(**__lowerCAmelCase )
lowerCamelCase__ = outputs.loss
lowerCamelCase__ = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
lowerCamelCase__ = 0
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ = model(**__lowerCAmelCase )
lowerCamelCase__ = outputs.logits.argmax(dim=-1 )
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__lowerCAmelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
lowerCamelCase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__lowerCAmelCase , references=__lowerCAmelCase , )
lowerCamelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 50
|
'''simple docstring'''
from PIL import Image
def A__ ( __lowerCAmelCase : Image , __lowerCAmelCase : float ):
def brightness(__lowerCAmelCase : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(__lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
UpperCamelCase : Union[str, Any] = change_brightness(img, 1_00)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 50
| 1
|
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ = logging.get_logger(__name__)
A_ = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class SCREAMING_SNAKE_CASE_ ( UpperCamelCase_ ):
"""simple docstring"""
A__ = "detr"
A__ = ["past_key_values"]
A__ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=3 , _lowerCAmelCase=100 , _lowerCAmelCase=6 , _lowerCAmelCase=2048 , _lowerCAmelCase=8 , _lowerCAmelCase=6 , _lowerCAmelCase=2048 , _lowerCAmelCase=8 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase="relu" , _lowerCAmelCase=256 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1.0 , _lowerCAmelCase=False , _lowerCAmelCase="sine" , _lowerCAmelCase="resnet50" , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=1 , _lowerCAmelCase=5 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=1 , _lowerCAmelCase=5 , _lowerCAmelCase=2 , _lowerCAmelCase=0.1 , **_lowerCAmelCase , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can\'t specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowerCamelCase__ = CONFIG_MAPPING['''resnet'''](out_features=["stage4"] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ = backbone_config.get("model_type" )
lowerCamelCase__ = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ = config_class.from_dict(UpperCamelCase__ )
# set timm attributes to None
lowerCamelCase__ = None, None, None
lowerCamelCase__ = use_timm_backbone
lowerCamelCase__ = backbone_config
lowerCamelCase__ = num_channels
lowerCamelCase__ = num_queries
lowerCamelCase__ = d_model
lowerCamelCase__ = encoder_ffn_dim
lowerCamelCase__ = encoder_layers
lowerCamelCase__ = encoder_attention_heads
lowerCamelCase__ = decoder_ffn_dim
lowerCamelCase__ = decoder_layers
lowerCamelCase__ = decoder_attention_heads
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = activation_function
lowerCamelCase__ = init_std
lowerCamelCase__ = init_xavier_std
lowerCamelCase__ = encoder_layerdrop
lowerCamelCase__ = decoder_layerdrop
lowerCamelCase__ = encoder_layers
lowerCamelCase__ = auxiliary_loss
lowerCamelCase__ = position_embedding_type
lowerCamelCase__ = backbone
lowerCamelCase__ = use_pretrained_backbone
lowerCamelCase__ = dilation
# Hungarian matcher
lowerCamelCase__ = class_cost
lowerCamelCase__ = bbox_cost
lowerCamelCase__ = giou_cost
# Loss coefficients
lowerCamelCase__ = mask_loss_coefficient
lowerCamelCase__ = dice_loss_coefficient
lowerCamelCase__ = bbox_loss_coefficient
lowerCamelCase__ = giou_loss_coefficient
lowerCamelCase__ = eos_coefficient
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def __magic_name__ ( self ):
return self.encoder_attention_heads
@property
def __magic_name__ ( self ):
return self.d_model
@classmethod
def __magic_name__ ( cls , _lowerCAmelCase , **_lowerCAmelCase ):
return cls(backbone_config=UpperCamelCase__ , **UpperCamelCase__ )
def __magic_name__ ( self ):
lowerCamelCase__ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCamelCase__ = self.backbone_config.to_dict()
lowerCamelCase__ = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE_ ( UpperCamelCase_ ):
"""simple docstring"""
A__ = version.parse("1.11" )
@property
def __magic_name__ ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def __magic_name__ ( self ):
return 1E-5
@property
def __magic_name__ ( self ):
return 12
| 712
|
from __future__ import annotations
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ = text, pattern
lowerCamelCase__ , lowerCamelCase__ = len(_lowerCAmelCase ), len(_lowerCAmelCase )
def __magic_name__ ( self , _lowerCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __magic_name__ ( self , _lowerCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __magic_name__ ( self ):
# searches pattern in text and returns index positions
lowerCamelCase__ = []
for i in range(self.textLen - self.patLen + 1 ):
lowerCamelCase__ = self.mismatch_in_text(_lowerCAmelCase )
if mismatch_index == -1:
positions.append(_lowerCAmelCase )
else:
lowerCamelCase__ = self.match_in_pattern(self.text[mismatch_index] )
lowerCamelCase__ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A_ = "ABAABA"
A_ = "AB"
A_ = BoyerMooreSearch(text, pattern)
A_ = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 360
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class _a ( UpperCamelCase__ ):
_lowercase : Any = '''mra'''
def __init__( self: Tuple , UpperCamelCase_: Optional[Any]=50_265 , UpperCamelCase_: Union[str, Any]=768 , UpperCamelCase_: Optional[Any]=12 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: Dict=3_072 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: List[Any]=512 , UpperCamelCase_: Union[str, Any]=1 , UpperCamelCase_: Dict=0.02 , UpperCamelCase_: Any=1E-5 , UpperCamelCase_: List[str]="absolute" , UpperCamelCase_: Dict=4 , UpperCamelCase_: Union[str, Any]="full" , UpperCamelCase_: List[str]=0 , UpperCamelCase_: Union[str, Any]=0 , UpperCamelCase_: str=1 , UpperCamelCase_: int=0 , UpperCamelCase_: Tuple=2 , **UpperCamelCase_: Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = type_vocab_size
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = block_per_row
lowercase__ = approx_mode
lowercase__ = initial_prior_first_n_blocks
lowercase__ = initial_prior_diagonal_n_blocks
| 43
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : Union[PIL.Image.Image, np.ndarray]
class _a ( UpperCamelCase__ ):
def __init__( self: Dict , UpperCamelCase_: PriorTransformer , UpperCamelCase_: CLIPVisionModel , UpperCamelCase_: CLIPImageProcessor , UpperCamelCase_: HeunDiscreteScheduler , UpperCamelCase_: ShapERenderer , ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
prior=UpperCamelCase_ , image_encoder=UpperCamelCase_ , image_processor=UpperCamelCase_ , scheduler=UpperCamelCase_ , renderer=UpperCamelCase_ , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple ) -> List[Any]:
"""simple docstring"""
if latents is None:
lowercase__ = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowercase__ = latents.to(UpperCamelCase_ )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self: str , UpperCamelCase_: Tuple=0 ) -> int:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase__ = torch.device(f'cuda:{gpu_id}' )
lowercase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
@property
def lowerCamelCase_ ( self: List[Any] ) -> Dict:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Tuple , UpperCamelCase_: str , ) -> Any:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , torch.Tensor ):
lowercase__ = torch.cat(UpperCamelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCamelCase_ , axis=0 )
if not isinstance(UpperCamelCase_ , torch.Tensor ):
lowercase__ = self.image_processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
lowercase__ = image.to(dtype=self.image_encoder.dtype , device=UpperCamelCase_ )
lowercase__ = self.image_encoder(UpperCamelCase_ )['''last_hidden_state''']
lowercase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowercase__ = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
lowercase__ = torch.zeros_like(UpperCamelCase_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 25 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 64 , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
lowercase__ = 1
elif isinstance(UpperCamelCase_ , torch.Tensor ):
lowercase__ = image.shape[0]
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowercase__ = len(UpperCamelCase_ )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCamelCase_ )}' )
lowercase__ = self._execution_device
lowercase__ = batch_size * num_images_per_prompt
lowercase__ = guidance_scale > 1.0
lowercase__ = self._encode_image(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# prior
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.prior.config.num_embeddings
lowercase__ = self.prior.config.embedding_dim
lowercase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowercase__ = latents.reshape(latents.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.prior(
UpperCamelCase_ , timestep=UpperCamelCase_ , proj_embedding=UpperCamelCase_ , ).predicted_image_embedding
# remove the variance
lowercase__ , lowercase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowercase__ = self.scheduler.step(
UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=UpperCamelCase_ )
lowercase__ = []
for i, latent in enumerate(UpperCamelCase_ ):
print()
lowercase__ = self.renderer.decode(
latent[None, :] , UpperCamelCase_ , size=UpperCamelCase_ , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(UpperCamelCase_ )
lowercase__ = torch.stack(UpperCamelCase_ )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowercase__ = images.cpu().numpy()
if output_type == "pil":
lowercase__ = [self.numpy_to_pil(UpperCamelCase_ ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=UpperCamelCase_ )
| 43
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : str = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
SCREAMING_SNAKE_CASE : int = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowercase__ )['last_hidden_state']
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , lowercase__ )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 179
|
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase ( a_ , a_ = None ) -> list[list[str]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = word_bank or []
# create a table
SCREAMING_SNAKE_CASE : int = len(a_ ) + 1
SCREAMING_SNAKE_CASE : list[list[list[str]]] = []
for _ in range(a_ ):
table.append([] )
# seed value
SCREAMING_SNAKE_CASE : List[Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(a_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(a_ )] == word:
SCREAMING_SNAKE_CASE : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(a_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(a_ )]:
combination.reverse()
return table[len(a_ )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 179
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class lowerCAmelCase ( UpperCAmelCase_ ):
UpperCAmelCase__ = """table-transformer"""
UpperCAmelCase__ = ["""past_key_values"""]
UpperCAmelCase__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : str , UpperCAmelCase : Any=True , UpperCAmelCase : str=None , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Optional[Any]=100 , UpperCAmelCase : Optional[int]=6 , UpperCAmelCase : List[Any]=2048 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Dict=6 , UpperCAmelCase : Any=2048 , UpperCAmelCase : List[Any]=8 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : int="relu" , UpperCAmelCase : str=256 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : str=0.0 , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : Dict=0.0_2 , UpperCAmelCase : Union[str, Any]=1.0 , UpperCAmelCase : Tuple=False , UpperCAmelCase : Any="sine" , UpperCAmelCase : Optional[int]="resnet50" , UpperCAmelCase : Dict=True , UpperCAmelCase : Dict=False , UpperCAmelCase : str=1 , UpperCAmelCase : List[str]=5 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Optional[int]=1 , UpperCAmelCase : Dict=1 , UpperCAmelCase : str=5 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Optional[Any]=0.1 , **UpperCAmelCase : Optional[int] , ) -> str:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCamelCase__ : str = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(a__ , a__ ):
lowerCamelCase__ : Optional[int] = backbone_config.get('model_type' )
lowerCamelCase__ : Any = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ : Union[str, Any] = config_class.from_dict(a__ )
# set timm attributes to None
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = None, None, None
lowerCamelCase__ : List[Any] = use_timm_backbone
lowerCamelCase__ : Union[str, Any] = backbone_config
lowerCamelCase__ : int = num_channels
lowerCamelCase__ : List[Any] = num_queries
lowerCamelCase__ : List[str] = d_model
lowerCamelCase__ : Union[str, Any] = encoder_ffn_dim
lowerCamelCase__ : List[Any] = encoder_layers
lowerCamelCase__ : str = encoder_attention_heads
lowerCamelCase__ : Optional[int] = decoder_ffn_dim
lowerCamelCase__ : List[str] = decoder_layers
lowerCamelCase__ : int = decoder_attention_heads
lowerCamelCase__ : List[str] = dropout
lowerCamelCase__ : Optional[int] = attention_dropout
lowerCamelCase__ : Tuple = activation_dropout
lowerCamelCase__ : List[Any] = activation_function
lowerCamelCase__ : Tuple = init_std
lowerCamelCase__ : str = init_xavier_std
lowerCamelCase__ : int = encoder_layerdrop
lowerCamelCase__ : Union[str, Any] = decoder_layerdrop
lowerCamelCase__ : Union[str, Any] = encoder_layers
lowerCamelCase__ : List[str] = auxiliary_loss
lowerCamelCase__ : List[Any] = position_embedding_type
lowerCamelCase__ : List[str] = backbone
lowerCamelCase__ : Union[str, Any] = use_pretrained_backbone
lowerCamelCase__ : str = dilation
# Hungarian matcher
lowerCamelCase__ : Dict = class_cost
lowerCamelCase__ : Optional[int] = bbox_cost
lowerCamelCase__ : str = giou_cost
# Loss coefficients
lowerCamelCase__ : List[Any] = mask_loss_coefficient
lowerCamelCase__ : Tuple = dice_loss_coefficient
lowerCamelCase__ : Dict = bbox_loss_coefficient
lowerCamelCase__ : Optional[int] = giou_loss_coefficient
lowerCamelCase__ : Optional[int] = eos_coefficient
super().__init__(is_encoder_decoder=a__ , **a__ )
@property
def A_ ( self : Dict ) -> str:
return self.encoder_attention_heads
@property
def A_ ( self : Any ) -> Optional[int]:
return self.d_model
class lowerCAmelCase ( UpperCAmelCase_ ):
UpperCAmelCase__ = version.parse("""1.11""" )
@property
def A_ ( self : Tuple ) -> int:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def A_ ( self : int ) -> Optional[int]:
return 1e-5
@property
def A_ ( self : Tuple ) -> Union[str, Any]:
return 12
| 295
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 51
| 0
|
from __future__ import annotations
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.append(_SCREAMING_SNAKE_CASE )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 620
|
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int = 200 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [1, 2, 5, 10, 20, 50, 100, 200]
SCREAMING_SNAKE_CASE_ = [0] * (pence + 1)
SCREAMING_SNAKE_CASE_ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_SCREAMING_SNAKE_CASE , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 620
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__lowerCAmelCase = False
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self ):
"""simple docstring"""
return 12
@property
def _a ( self ):
"""simple docstring"""
return 12
@property
def _a ( self ):
"""simple docstring"""
return 32
@property
def _a ( self ):
"""simple docstring"""
torch.manual_seed(0 )
a_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def _a ( self ):
"""simple docstring"""
a_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _a ( self ):
"""simple docstring"""
torch.manual_seed(0 )
a_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(_lowercase )
@property
def _a ( self ):
"""simple docstring"""
torch.manual_seed(0 )
a_ = 12
a_ = 12
a_ = {
"attention_bias": True,
"cross_attention_dim": 32,
"attention_head_dim": height * width,
"num_attention_heads": 1,
"num_vector_embeds": self.num_embed,
"num_embeds_ada_norm": self.num_embeds_ada_norm,
"norm_num_groups": 32,
"sample_size": width,
"activation_fn": "geglu-approximate",
}
a_ = TransformeraDModel(**_lowercase )
return model
def _a ( self ):
"""simple docstring"""
a_ = "cpu"
a_ = self.dummy_vqvae
a_ = self.dummy_text_encoder
a_ = self.dummy_tokenizer
a_ = self.dummy_transformer
a_ = VQDiffusionScheduler(self.num_embed )
a_ = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowercase )
a_ = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
a_ = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
a_ = "teddy bear playing in the pool"
a_ = torch.Generator(device=_lowercase ).manual_seed(0 )
a_ = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type='np' )
a_ = output.images
a_ = torch.Generator(device=_lowercase ).manual_seed(0 )
a_ = pipe(
[prompt] , generator=_lowercase , output_type='np' , return_dict=_lowercase , num_inference_steps=2 )[0]
a_ = image[0, -3:, -3:, -1]
a_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
a_ = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self ):
"""simple docstring"""
a_ = "cpu"
a_ = self.dummy_vqvae
a_ = self.dummy_text_encoder
a_ = self.dummy_tokenizer
a_ = self.dummy_transformer
a_ = VQDiffusionScheduler(self.num_embed )
a_ = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
a_ = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
a_ = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
a_ = "teddy bear playing in the pool"
a_ = torch.Generator(device=_lowercase ).manual_seed(0 )
a_ = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type='np' )
a_ = output.images
a_ = torch.Generator(device=_lowercase ).manual_seed(0 )
a_ = pipe(
[prompt] , generator=_lowercase , output_type='np' , return_dict=_lowercase , num_inference_steps=2 )[0]
a_ = image[0, -3:, -3:, -1]
a_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
a_ = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ):
"""simple docstring"""
a_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
a_ = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
a_ = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
a_ = torch.Generator(device=_lowercase ).manual_seed(0 )
a_ = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=_lowercase , output_type='np' , )
a_ = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 536
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
A = logging.get_logger(__name__)
A = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class _a ( SCREAMING_SNAKE_CASE__):
__magic_name__ = """bloom"""
__magic_name__ = ["""past_key_values"""]
__magic_name__ = {
"""num_hidden_layers""": """n_layer""",
"""num_attention_heads""": """n_head""",
}
def __init__( self : Tuple , _lowercase : List[Any]=250880 , _lowercase : int=64 , _lowercase : Optional[Any]=2 , _lowercase : Dict=8 , _lowercase : List[str]=1E-5 , _lowercase : List[Any]=0.02 , _lowercase : Optional[Any]=True , _lowercase : Dict=1 , _lowercase : Union[str, Any]=2 , _lowercase : str=False , _lowercase : List[Any]=0.0 , _lowercase : Tuple=0.0 , _lowercase : Dict=1 , _lowercase : int=False , **_lowercase : int , ) -> List[str]:
snake_case : Any = vocab_size
# Backward compatibility with n_embed kwarg
snake_case : Any = kwargs.pop("n_embed" , _lowercase )
snake_case : Tuple = hidden_size if n_embed is None else n_embed
snake_case : Optional[Any] = n_layer
snake_case : Optional[Any] = n_head
snake_case : Union[str, Any] = layer_norm_epsilon
snake_case : int = initializer_range
snake_case : int = use_cache
snake_case : int = pretraining_tp
snake_case : Tuple = apply_residual_connection_post_layernorm
snake_case : Union[str, Any] = hidden_dropout
snake_case : Optional[Any] = attention_dropout
snake_case : List[Any] = bos_token_id
snake_case : Any = eos_token_id
snake_case : Optional[Any] = slow_but_exact
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
class _a ( SCREAMING_SNAKE_CASE__):
__magic_name__ = version.parse("""1.12""")
def __init__( self : int , _lowercase : PretrainedConfig , _lowercase : str = "default" , _lowercase : List[PatchingSpec] = None , _lowercase : bool = False , ) -> Dict:
super().__init__(_lowercase , task=_lowercase , patching_specs=_lowercase , use_past=_lowercase )
if not getattr(self._config , "pad_token_id" , _lowercase ):
# TODO: how to do that better?
snake_case : int = 0
@property
def __lowercase ( self : str ) -> Mapping[str, Mapping[int, str]]:
snake_case : int = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_lowercase , direction="inputs" , inverted_values_shape=_lowercase )
snake_case : Optional[int] = {0: "batch", 1: "past_sequence + sequence"}
else:
snake_case : Dict = {0: "batch", 1: "sequence"}
return common_inputs
@property
def __lowercase ( self : int ) -> int:
return self._config.n_layer
@property
def __lowercase ( self : Dict ) -> int:
return self._config.n_head
@property
def __lowercase ( self : Union[str, Any] ) -> float:
return 1E-3
def __lowercase ( self : List[str] , _lowercase : "PreTrainedTokenizer" , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
snake_case : int = super(_lowercase , self ).generate_dummy_inputs(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
# We need to order the input in the way they appears in the forward()
snake_case : Any = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
snake_case , snake_case : List[str] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
snake_case : Optional[Any] = seqlen + 2
snake_case : Optional[int] = self._config.hidden_size // self.num_attention_heads
snake_case : List[str] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
snake_case : Any = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
snake_case : Dict = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(self.num_layers )
]
snake_case : Tuple = common_inputs["attention_mask"]
if self.use_past:
snake_case : Any = ordered_inputs["attention_mask"].dtype
snake_case : List[str] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 )
return ordered_inputs
@property
def __lowercase ( self : str ) -> int:
return 13
| 449
| 0
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : List[Any] = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 721
|
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = fname.split(os.path.sep )[-1]
return re.search(R"^(.*)_\d+\.jpg$", A ).groups()[0]
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None ) -> Tuple:
a = file_names
a = image_transform
a = label_to_id
def __len__( self : Any ) -> Tuple:
return len(self.file_names )
def __getitem__( self : List[Any] , __lowerCamelCase : List[Any] ) -> int:
a = self.file_names[idx]
a = PIL.Image.open(__lowerCamelCase )
a = raw_image.convert("RGB" )
if self.image_transform is not None:
a = self.image_transform(__lowerCamelCase )
a = extract_label(__lowerCamelCase )
if self.label_to_id is not None:
a = self.label_to_id[label]
return {"image": image, "label": label}
def __magic_name__ ( A : str, A : int ):
'''simple docstring'''
if args.with_tracking:
a = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir )
else:
a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a = config["lr"]
a = int(config["num_epochs"] )
a = int(config["seed"] )
a = int(config["batch_size"] )
a = config["image_size"]
if not isinstance(A, (list, tuple) ):
a = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps, "isdigit" ):
if args.checkpointing_steps == "epoch":
a = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
a = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
a = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
a = os.path.split(A )[-1].split("." )[0]
accelerator.init_trackers(A, A )
# Grab all the image filenames
a = [os.path.join(args.data_dir, A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
a = [extract_label(A ) for fname in file_names]
a = list(set(A ) )
id_to_label.sort()
a = {lbl: i for i, lbl in enumerate(A )}
# Set the seed before splitting the data.
np.random.seed(A )
torch.manual_seed(A )
torch.cuda.manual_seed_all(A )
# Split our filenames between train and validation
a = np.random.permutation(len(A ) )
a = int(0.8 * len(A ) )
a = random_perm[:cut]
a = random_perm[cut:]
# For training we use a simple RandomResizedCrop
a = Compose([RandomResizedCrop(A, scale=(0.5, 1.0) ), ToTensor()] )
a = PetsDataset(
[file_names[i] for i in train_split], image_transform=A, label_to_id=A )
# For evaluation, we use a deterministic Resize
a = Compose([Resize(A ), ToTensor()] )
a = PetsDataset([file_names[i] for i in eval_split], image_transform=A, label_to_id=A )
# Instantiate dataloaders.
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a = create_model("resnet50d", pretrained=A, num_classes=len(A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
a = False
for param in model.get_classifier().parameters():
a = True
# We normalize the batches of images to be a bit faster.
a = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
a = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
a = torch.optim.Adam(params=model.parameters(), lr=lr / 25 )
# Instantiate learning rate scheduler
a = OneCycleLR(optimizer=A, max_lr=A, epochs=A, steps_per_epoch=len(A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a , a , a , a , a = accelerator.prepare(
A, A, A, A, A )
# We need to keep track of how many total steps we have iterated over
a = 0
# We also need to keep track of the starting epoch so files are named properly
a = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
a = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
a = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
a = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
a = os.path.splitext(A )[0]
if "epoch" in training_difference:
a = int(training_difference.replace("epoch_", "" ) ) + 1
a = None
else:
a = int(training_difference.replace("step_", "" ) )
a = resume_step // len(A )
resume_step -= starting_epoch * len(A )
# Now we train the model
for epoch in range(A, A ):
model.train()
if args.with_tracking:
a = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
a = accelerator.skip_first_batches(A, A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
a = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
a = model(A )
a = torch.nn.functional.cross_entropy(A, batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(A, A ):
a = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
model.eval()
a = 0
a = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
with torch.no_grad():
a = model(A )
a = outputs.argmax(dim=-1 )
a , a = accelerator.gather_for_metrics((predictions, batch["label"]) )
a = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
a = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(A ),
"epoch": epoch,
}, step=A, )
if checkpointing_steps == "epoch":
a = F"""epoch_{epoch}"""
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
if args.with_tracking:
accelerator.end_training()
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir", required=A, help="The data folder on disk." )
parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision", type=A, default=A, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps", type=A, default=A, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", )
parser.add_argument(
"--output_dir", type=A, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--resume_from_checkpoint", type=A, default=A, help="If the training should continue from a checkpoint folder.", )
parser.add_argument(
"--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", )
parser.add_argument(
"--project_dir", type=A, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", )
a = parser.parse_args()
a = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(A, A )
if __name__ == "__main__":
main()
| 662
| 0
|
from sklearn.metrics import fa_score
import datasets
lowercase__ : Optional[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowercase__ : Union[str, Any] = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowercase__ : Union[str, Any] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def lowerCAmelCase_ ( self , A , A , A=None , A=1 , A="binary" , A=None ) -> str:
'''simple docstring'''
a = fa_score(
A , A , labels=A , pos_label=A , average=A , sample_weight=A )
return {"f1": float(A ) if score.size == 1 else score}
| 515
|
lowercase__ : Optional[int] = 9.8_0665
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = g) -> float:
if fluid_density <= 0:
raise ValueError("Impossible fluid density")
if volume < 0:
raise ValueError("Impossible Object volume")
if gravity <= 0:
raise ValueError("Impossible Gravity")
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 515
| 1
|
'''simple docstring'''
def UpperCAmelCase ( lowerCamelCase_ :List[str] ):
'''simple docstring'''
snake_case_ : Tuple = len(__lowerCAmelCase )
snake_case_ : str = len(matrix[0] )
snake_case_ : Any = min(__lowerCAmelCase , __lowerCAmelCase )
for row in range(__lowerCAmelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , __lowerCAmelCase ):
snake_case_ : Union[str, Any] = matrix[col][row] / matrix[row][row]
for i in range(__lowerCAmelCase , __lowerCAmelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
snake_case_ : Union[str, Any] = True
for i in range(row + 1 , __lowerCAmelCase ):
if matrix[i][row] != 0:
snake_case_ : Optional[int] = matrix[i], matrix[row]
snake_case_ : Any = False
break
if reduce:
rank -= 1
for i in range(__lowerCAmelCase ):
snake_case_ : Dict = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __UpperCamelCase ( lowercase__ ):
@staticmethod
@abstractmethod
def a__ ( _UpperCamelCase :ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def a__ ( self :Optional[Any] ):
raise NotImplementedError()
| 267
| 0
|
"""simple docstring"""
from __future__ import annotations
_lowercase : Union[str, Any] = 1.6_0_2_1e-1_9 # units = C
def lowercase__ ( snake_case_ :List[str] , snake_case_ :int , snake_case_ :List[str] , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
|
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__A =2
class _snake_case :
def __init__( self , *, # begin keyword-only arguments
_lowerCamelCase="<s>" , _lowerCamelCase="<pad>" , _lowerCamelCase="</s>" , _lowerCamelCase="<unk>" , _lowerCamelCase=None , ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = bos, unk, pad, eos
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : Union[str, Any] = self.add_symbol(_lowerCamelCase)
UpperCAmelCase__ : Any = self.add_symbol(_lowerCamelCase)
UpperCAmelCase__ : List[str] = self.add_symbol(_lowerCamelCase)
UpperCAmelCase__ : str = self.add_symbol(_lowerCamelCase)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = len(self.symbols)
def __eq__( self , _lowerCamelCase):
return self.indices == other.indices
def __getitem__( self , _lowerCamelCase):
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__( self):
return len(self.symbols)
def __contains__( self , _lowerCamelCase):
return sym in self.indices
@classmethod
def snake_case__ ( cls , _lowerCamelCase):
UpperCAmelCase__ : Dict = cls()
d.add_from_file(_lowerCamelCase)
return d
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=1 , _lowerCamelCase=False):
if word in self.indices and not overwrite:
UpperCAmelCase__ : Tuple = self.indices[word]
UpperCAmelCase__ : Optional[int] = self.count[idx] + n
return idx
else:
UpperCAmelCase__ : Union[str, Any] = len(self.symbols)
UpperCAmelCase__ : Optional[int] = idx
self.symbols.append(_lowerCamelCase)
self.count.append(_lowerCamelCase)
return idx
def snake_case__ ( self , _lowerCamelCase):
return 0
def snake_case__ ( self , _lowerCamelCase):
if isinstance(_lowerCamelCase , _lowerCamelCase):
try:
with open(_lowerCamelCase , """r""" , encoding="""utf-8""") as fd:
self.add_from_file(_lowerCamelCase)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(_lowerCamelCase))
return
UpperCAmelCase__ : Optional[int] = f.readlines()
UpperCAmelCase__ : List[Any] = self._load_meta(_lowerCamelCase)
for line in lines[indices_start_line:]:
try:
UpperCAmelCase__ , UpperCAmelCase__ : int = line.rstrip().rsplit(""" """ , 1)
if field == "#fairseq:overwrite":
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ , UpperCAmelCase__ : int = line.rsplit(""" """ , 1)
else:
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : int = int(_lowerCamelCase)
UpperCAmelCase__ : Any = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(_lowerCamelCase))
self.add_symbol(_lowerCamelCase , n=_lowerCamelCase , overwrite=_lowerCamelCase)
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""")
def _UpperCamelCase ( UpperCamelCase__ ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
UpperCAmelCase__ : Optional[Any] = dict((re.sub(R"""@@$""" , """""" , UpperCamelCase__ ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , UpperCamelCase__ ), v) for k, v in d.items() )
UpperCAmelCase__ : int = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
UpperCAmelCase__ : str = d[k] # restore
return da
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
# prep
if not os.path.exists(UpperCamelCase__ ):
raise ValueError(f'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
UpperCAmelCase__ : str = os.path.join(UpperCamelCase__ , """checkpoint.pt""" )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(f'''path to the file {checkpoint_file} does not exist!''' )
UpperCAmelCase__ : Any = torch.load(UpperCamelCase__ , map_location="""cpu""" )
UpperCAmelCase__ : Union[str, Any] = chkpt["""cfg"""]["""model"""]
# dicts
UpperCAmelCase__ : str = os.path.join(UpperCamelCase__ , """dict.txt""" )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(f'''path to the file {dict_file} does not exist!''' )
UpperCAmelCase__ : Tuple = Dictionary.load(UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = rewrite_dict_keys(src_dict.indices )
UpperCAmelCase__ : Union[str, Any] = len(UpperCamelCase__ )
UpperCAmelCase__ : int = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES["""vocab_file"""] )
print(f'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# merges_file (bpecodes)
UpperCAmelCase__ : Optional[int] = os.path.join(UpperCamelCase__ , """bpecodes""" )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(f'''path to the file {bpecodes_file} does not exist!''' )
UpperCAmelCase__ : Dict = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(UpperCamelCase__ , UpperCamelCase__ )
# model config
UpperCAmelCase__ : str = os.path.join(UpperCamelCase__ , """config.json""" )
UpperCAmelCase__ : Any = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(f'''Generating {biogpt_model_config_file}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# tokenizer config
UpperCAmelCase__ : Any = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : str = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1_0_2_4,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(f'''Generating {biogpt_tokenizer_config_file}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# model
UpperCAmelCase__ : List[str] = chkpt["""model"""]
# remove unneeded keys
UpperCAmelCase__ : Dict = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : Tuple = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
UpperCAmelCase__ : Union[str, Any] = model_state_dict.pop(UpperCamelCase__ )
else:
UpperCAmelCase__ : List[Any] = model_state_dict.pop(UpperCamelCase__ )
UpperCAmelCase__ : str = BioGptConfig.from_pretrained(UpperCamelCase__ )
UpperCAmelCase__ : Tuple = BioGptForCausalLM(UpperCamelCase__ )
# check that it loads ok
model_new.load_state_dict(UpperCamelCase__ )
# save
UpperCAmelCase__ : Any = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
print("""Conversion is done!""" )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__A =parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 407
| 0
|
'''simple docstring'''
from math import sqrt
def _lowerCAmelCase ( lowerCamelCase_ : int ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
__lowercase = True
# 0 and 1 are none primes.
if number <= 1:
__lowercase = False
for divisor in range(2 , int(round(sqrt(lowerCamelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowercase = False
break
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'status' must been from type bool"
return status
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowercase = list(range(2 , n + 1 ) )
__lowercase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase_ ) ):
for j in range(i + 1 , len(lowerCamelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowercase = 0
# filters actual prime numbers.
__lowercase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
__lowercase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCamelCase_ ):
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : List[str] ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and number >= 0, "'number' must been an int and >= 0"
__lowercase = [] # this list will be returns of the function.
# potential prime number factors.
__lowercase = 2
__lowercase = number
if number == 0 or number == 1:
ans.append(lowerCamelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase_ ):
while quotient != 1:
if is_prime(lowerCamelCase_ ) and (quotient % factor == 0):
ans.append(lowerCamelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowercase = 0
# prime factorization of 'number'
__lowercase = prime_factorization(lowerCamelCase_ )
__lowercase = max(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowercase = 0
# prime factorization of 'number'
__lowercase = prime_factorization(lowerCamelCase_ )
__lowercase = min(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Tuple ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def _lowerCAmelCase ( lowerCamelCase_ : int ):
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (number > 2) and is_even(lowerCamelCase_ )
), "'number' must been an int, even and > 2"
__lowercase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowercase = get_prime_numbers(lowerCamelCase_ )
__lowercase = len(lowerCamelCase_ )
# run variable for while-loops.
__lowercase = 0
__lowercase = None
# exit variable. for break up the loops
__lowercase = True
while i < len_pn and loop:
__lowercase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowercase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (len(lowerCamelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] ):
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowercase = 0
while numbera != 0:
__lowercase = numbera % numbera
__lowercase = numbera
__lowercase = rest
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] ):
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowercase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowercase = prime_factorization(lowerCamelCase_ )
__lowercase = prime_factorization(lowerCamelCase_ )
elif numbera == 1 or numbera == 1:
__lowercase = []
__lowercase = []
__lowercase = max(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = 0
__lowercase = 0
__lowercase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowercase = prime_fac_a.count(lowerCamelCase_ )
__lowercase = prime_fac_a.count(lowerCamelCase_ )
for _ in range(max(lowerCamelCase_ , lowerCamelCase_ ) ):
ans *= n
else:
__lowercase = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowercase = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : str ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'number' must been a positive int"
__lowercase = 0
__lowercase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and is_prime(
lowerCamelCase_ ), "'ans' must been a prime number and from type int"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] ):
assert (
is_prime(lowerCamelCase_ ) and is_prime(lowerCamelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowercase = p_number_a + 1 # jump to the next number
__lowercase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCamelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 1), "'n' must been int and >= 1"
__lowercase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCamelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Tuple ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
__lowercase = get_divisors(lowerCamelCase_ )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : List[str] ):
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowercase = gcd(abs(lowerCamelCase_ ) , abs(lowerCamelCase_ ) )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
__lowercase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
__lowercase = 0
__lowercase = 1
__lowercase = 1 # this will be return
for _ in range(n - 1 ):
__lowercase = ans
ans += fiba
__lowercase = tmp
return ans
| 56
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
__lowercase = dict(zip(_lowerCamelCase ,range(len(_lowerCamelCase ) ) ) )
__lowercase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
__lowercase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,_lowerCamelCase )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.feature_extraction_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
# load decoder from hub
__lowercase = '''hf-internal-testing/ngram-beam-search-decoder'''
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCamelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_feature_extractor()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_lowerCamelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,_lowerCamelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_lowerCamelCase ,'''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCamelCase ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = floats_list((3, 1000) )
__lowercase = feature_extractor(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor(_lowerCamelCase ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = '''This is a test string'''
__lowercase = processor(text=_lowerCamelCase )
__lowercase = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCAmelCase (self ,_lowerCamelCase=(2, 10, 16) ,_lowerCamelCase=77 ) -> Optional[int]:
'''simple docstring'''
np.random.seed(_lowerCamelCase )
return np.random.rand(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
__lowercase = processor.decode(_lowerCamelCase )
__lowercase = decoder.decode_beams(_lowerCamelCase )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase = processor.batch_decode(_lowerCamelCase )
else:
with get_context(_lowerCamelCase ).Pool() as pool:
__lowercase = processor.batch_decode(_lowerCamelCase ,_lowerCamelCase )
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as p:
__lowercase = decoder.decode_beams_batch(_lowerCamelCase ,_lowerCamelCase )
__lowercase , __lowercase , __lowercase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCamelCase ,decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] ,decoded_processor.text )
self.assertListEqual(_lowerCamelCase ,decoded_processor.logit_score )
self.assertListEqual(_lowerCamelCase ,decoded_processor.lm_score )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 15
__lowercase = -2_0.0
__lowercase = -4.0
__lowercase = processor.batch_decode(
_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
__lowercase = [d[0][2] for d in decoded_decoder_out]
__lowercase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] ,_lowerCamelCase )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] ,_lowerCamelCase ,atol=1E-3 ) )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] ,_lowerCamelCase ,atol=1E-3 ) )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 2.0
__lowercase = 5.0
__lowercase = -2_0.0
__lowercase = True
__lowercase = processor.batch_decode(
_lowerCamelCase ,alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
decoder.reset_params(
alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] ,_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-2_0.0 )
self.assertEqual(lm_model.score_boundary ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = os.listdir(_lowerCamelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = floats_list((3, 1000) )
__lowercase = processor_wavaveca(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor_auto(_lowerCamelCase ,return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1E-2 )
__lowercase = self._get_dummy_logits()
__lowercase = processor_wavaveca.batch_decode(_lowerCamelCase )
__lowercase = processor_auto.batch_decode(_lowerCamelCase )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg='''`processor` and `feature_extractor` model input names do not match''' ,)
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = [d[key] for d in offsets]
return retrieved_list
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()[0]
__lowercase = processor.decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''end_offset''' ) ,[1, 3, 5] )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()
__lowercase = processor.batch_decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) for o in outputs['''word_offsets''']] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''end_offset''' ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
import torch
__lowercase = load_dataset('''common_voice''' ,'''en''' ,split='''train''' ,streaming=_lowerCamelCase )
__lowercase = ds.cast_column('''audio''' ,datasets.Audio(sampling_rate=16000 ) )
__lowercase = iter(_lowerCamelCase )
__lowercase = next(_lowerCamelCase )
__lowercase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
__lowercase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase = processor(sample['''audio''']['''array'''] ,return_tensors='''pt''' ).input_values
with torch.no_grad():
__lowercase = model(_lowerCamelCase ).logits.cpu().numpy()
__lowercase = processor.decode(logits[0] ,output_word_offsets=_lowerCamelCase )
__lowercase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
__lowercase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,_lowerCamelCase )
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,output.text )
# output times
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''start_time''' ) )
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''end_time''' ) )
# fmt: off
__lowercase = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__lowercase = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
| 56
| 1
|
from __future__ import annotations
import math
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
)
def _snake_case( ) -> None:
lowercase : int = [90, 23, 6, 33, 21, 65, 123, 34_423]
lowercase : List[str] = math.log(len(SCREAMING_SNAKE_CASE__ ) , 2 )
print(f"Optimal value : {minimax(0 , 0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 336
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : str = {
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = ["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = [
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = [
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
lowercase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 336
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=True , __magic_name__=1 / 255 , __magic_name__=True , ):
"""simple docstring"""
A_ : Any = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
A_ : Tuple = parent
A_ : List[Any] = batch_size
A_ : str = num_channels
A_ : int = min_resolution
A_ : Tuple = max_resolution
A_ : int = do_resize
A_ : Optional[Any] = size
A_ : int = do_normalize
A_ : List[str] = image_mean
A_ : Optional[int] = image_std
A_ : List[str] = do_rescale
A_ : Dict = rescale_factor
A_ : Optional[int] = do_pad
def UpperCAmelCase ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase ( self , __magic_name__ , __magic_name__=False ):
"""simple docstring"""
if not batched:
A_ : List[Any] = image_inputs[0]
if isinstance(__magic_name__ , Image.Image ):
A_ , A_ : List[Any] = image.size
else:
A_ , A_ : Any = image.shape[1], image.shape[2]
if w < h:
A_ : Tuple = int(self.size['''shortest_edge'''] * h / w )
A_ : int = self.size['''shortest_edge''']
elif w > h:
A_ : Union[str, Any] = self.size['''shortest_edge''']
A_ : List[str] = int(self.size['''shortest_edge'''] * w / h )
else:
A_ : Any = self.size['''shortest_edge''']
A_ : Tuple = self.size['''shortest_edge''']
else:
A_ : Dict = []
for image in image_inputs:
A_ , A_ : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : Optional[int] = max(__magic_name__ , key=lambda __magic_name__ : item[0] )[0]
A_ : int = max(__magic_name__ , key=lambda __magic_name__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __UpperCAmelCase( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = DetaImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : str = DetaImageProcessingTester(self )
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , '''image_mean''' ) )
self.assertTrue(hasattr(__magic_name__ , '''image_std''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_normalize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_rescale''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_pad''' ) )
self.assertTrue(hasattr(__magic_name__ , '''size''' ) )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , __magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
A_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
A_ , A_ : int = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : Dict = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
A_ : str = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
A_ , A_ : Optional[int] = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Union[str, Any] = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
A_ , A_ : Dict = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
A_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
A_ , A_ : List[str] = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : int = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
A_ , A_ : Optional[int] = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
A_ : Dict = json.loads(f.read() )
A_ : Optional[Any] = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
A_ : List[str] = DetaImageProcessor()
A_ : List[str] = image_processing(images=__magic_name__ , annotations=__magic_name__ , return_tensors='''pt''' )
# verify pixel values
A_ : Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __magic_name__ )
A_ : Any = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __magic_name__ , atol=1e-4 ) )
# verify area
A_ : List[str] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __magic_name__ ) )
# verify boxes
A_ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __magic_name__ )
A_ : List[str] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __magic_name__ , atol=1e-3 ) )
# verify image_id
A_ : Optional[Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __magic_name__ ) )
# verify is_crowd
A_ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __magic_name__ ) )
# verify class_labels
A_ : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __magic_name__ ) )
# verify orig_size
A_ : Dict = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __magic_name__ ) )
# verify size
A_ : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __magic_name__ ) )
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
A_ : Dict = json.loads(f.read() )
A_ : int = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
A_ : Any = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
A_ : List[str] = DetaImageProcessor(format='''coco_panoptic''' )
A_ : Optional[Any] = image_processing(images=__magic_name__ , annotations=__magic_name__ , masks_path=__magic_name__ , return_tensors='''pt''' )
# verify pixel values
A_ : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __magic_name__ )
A_ : Any = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __magic_name__ , atol=1e-4 ) )
# verify area
A_ : Any = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __magic_name__ ) )
# verify boxes
A_ : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __magic_name__ )
A_ : Union[str, Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __magic_name__ , atol=1e-3 ) )
# verify image_id
A_ : Union[str, Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __magic_name__ ) )
# verify is_crowd
A_ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __magic_name__ ) )
# verify class_labels
A_ : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __magic_name__ ) )
# verify masks
A_ : Optional[int] = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __magic_name__ )
# verify orig_size
A_ : Dict = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __magic_name__ ) )
# verify size
A_ : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __magic_name__ ) )
| 717
|
from math import loga
def a__ ( a ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(a , a ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 236
| 0
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : List[Any] ="t5"
__A : List[Any] =["past_key_values"]
__A : int ={"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self ,_snake_case=3_21_28 ,_snake_case=5_12 ,_snake_case=64 ,_snake_case=20_48 ,_snake_case=6 ,_snake_case=None ,_snake_case=8 ,_snake_case=32 ,_snake_case=1_28 ,_snake_case=0.1 ,_snake_case=1E-6 ,_snake_case=1.0 ,_snake_case="relu" ,_snake_case=True ,_snake_case=True ,_snake_case=0 ,_snake_case=1 ,**_snake_case ,):
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : str = d_model
UpperCAmelCase_ : Any = d_kv
UpperCAmelCase_ : Any = d_ff
UpperCAmelCase_ : str = num_layers
UpperCAmelCase_ : List[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase_ : Optional[int] = num_heads
UpperCAmelCase_ : List[str] = relative_attention_num_buckets
UpperCAmelCase_ : int = relative_attention_max_distance
UpperCAmelCase_ : Union[str, Any] = dropout_rate
UpperCAmelCase_ : Optional[Any] = layer_norm_epsilon
UpperCAmelCase_ : Any = initializer_factor
UpperCAmelCase_ : List[Any] = feed_forward_proj
UpperCAmelCase_ : List[str] = use_cache
UpperCAmelCase_ : Any = self.feed_forward_proj.split("-" )
UpperCAmelCase_ : Union[str, Any] = act_info[-1]
UpperCAmelCase_ : str = act_info[0] == "gated"
if len(_snake_case ) > 1 and act_info[0] != "gated" or len(_snake_case ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCAmelCase_ : Any = "gelu_new"
super().__init__(
pad_token_id=_snake_case ,eos_token_id=_snake_case ,is_encoder_decoder=_snake_case ,**_snake_case ,)
class _snake_case (__SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
UpperCAmelCase_ : Union[str, Any] = "past_encoder_sequence + sequence"
UpperCAmelCase_ : Any = {0: "batch"}
UpperCAmelCase_ : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCAmelCase_ : List[Any] = {0: "batch", 1: "decoder_sequence"}
UpperCAmelCase_ : List[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_snake_case ,direction="inputs" )
return common_inputs
@property
def UpperCamelCase__ ( self ):
return 13
| 71
|
from collections.abc import Generator
from math import sin
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if len(_lowercase ) != 32:
raise ValueError('Input must be of length 32' )
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__UpperCamelCase = format(_lowercase , '08x' )[-8:]
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = B''
for char in message:
bit_string += format(_lowercase , '08b' ).encode('utf-8' )
__UpperCamelCase = format(len(_lowercase ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_lowercase ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _A ( _lowercase ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(_lowercase ) % 5_12 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(_lowercase ) , 5_12 ):
__UpperCamelCase = bit_string[pos : pos + 5_12]
__UpperCamelCase = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _A ( _lowercase ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__UpperCamelCase = format(_lowercase , '032b' )
__UpperCamelCase = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_lowercase , 2 )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return (a + b) % 2**32
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = preprocess(_lowercase )
__UpperCamelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__UpperCamelCase = 0X67_45_23_01
__UpperCamelCase = 0Xef_cd_ab_89
__UpperCamelCase = 0X98_ba_dc_fe
__UpperCamelCase = 0X10_32_54_76
__UpperCamelCase = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_lowercase ):
__UpperCamelCase = aa
__UpperCamelCase = ba
__UpperCamelCase = ca
__UpperCamelCase = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__UpperCamelCase = d ^ (b & (c ^ d))
__UpperCamelCase = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__UpperCamelCase = c ^ (d & (b ^ c))
__UpperCamelCase = (5 * i + 1) % 16
elif i <= 47:
__UpperCamelCase = b ^ c ^ d
__UpperCamelCase = (3 * i + 5) % 16
else:
__UpperCamelCase = c ^ (b | not_aa(_lowercase ))
__UpperCamelCase = (7 * i) % 16
__UpperCamelCase = (f + a + added_consts[i] + block_words[g]) % 2**32
__UpperCamelCase = d
__UpperCamelCase = c
__UpperCamelCase = b
__UpperCamelCase = sum_aa(_lowercase , left_rotate_aa(_lowercase , shift_amounts[i] ) )
# Add hashed chunk to running total
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1
| 0
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A_ ( ):
"""simple docstring"""
_a = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''', type=_lowerCAmelCase, default=1, help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''', type=_lowerCAmelCase, help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
), )
# rest from the training program
parser.add_argument('''training_script_args''', nargs=_lowerCAmelCase )
return parser.parse_args()
def A_ ( ):
"""simple docstring"""
_a = parse_args()
# Import training_script as a module.
_a = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_a = script_fpath.stem
_a = importlib.import_module(_lowerCAmelCase )
# Patch sys.argv
_a = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 709
|
"""simple docstring"""
def A_ ( _lowerCAmelCase : int = 10_00 ):
"""simple docstring"""
_a , _a = 1, 1
_a = 2
while True:
_a = 0
_a = fa + fa
_a , _a = fa, f
index += 1
for _ in str(_lowerCAmelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 285
| 0
|
import torch
from transformers import AutoModel
class UpperCAmelCase ( torch.nn.Module ):
def __init__( self: Optional[int] , __UpperCamelCase: Tuple="sayef/fsner-bert-base-uncased" ):
super(__UpperCamelCase , self ).__init__()
_a = AutoModel.from_pretrained(__UpperCamelCase , return_dict=__UpperCamelCase )
_a = torch.nn.CosineSimilarity(3 , 1E-08 )
_a = torch.nn.Softmax(dim=1 )
def _A ( self: str , **__UpperCamelCase: Optional[Any] ):
return self.bert(**__UpperCamelCase ).last_hidden_state
def _A ( self: List[Any] , __UpperCamelCase: int ):
return token_embeddings.sum(2 , keepdim=__UpperCamelCase )
def _A ( self: Union[str, Any] , __UpperCamelCase: List[Any] , __UpperCamelCase: Union[str, Any] , __UpperCamelCase: List[str]=1 ):
return self.softmax(T * self.cos(__UpperCamelCase , __UpperCamelCase ) )
def _A ( self: Optional[Any] , __UpperCamelCase: Union[str, Any] , __UpperCamelCase: Tuple ):
_a = W_supports['''sizes'''].tolist()
_a = W_supports['''start_token_id'''].item()
_a = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_a = self.BERT(**__UpperCamelCase )
_a = self.BERT(**__UpperCamelCase )
_a = None
_a = None
_a = W_supports['''input_ids'''] == start_token_id
_a = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__UpperCamelCase ):
if i == 0:
_a = 0
else:
_a = support_sizes[i - 1]
_a = S[s : s + size][start_token_masks[s : s + size]]
_a = S[s : s + size][end_token_masks[s : s + size]]
_a = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_a = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_a = torch.vstack((p_starts, p_start) )
_a = torch.vstack((p_ends, p_end) )
else:
_a = p_start
_a = p_end
return p_starts, p_ends
| 487
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCAmelCase :
def __init__( self: List[str] , __UpperCamelCase: Optional[int] , ):
_a = parent
_a = 13
_a = 7
_a = True
_a = True
_a = True
_a = True
_a = True
_a = False
_a = False
_a = False
_a = 2
_a = 99
_a = 0
_a = 32
_a = 2
_a = 4
_a = 0.1
_a = 0.1
_a = 512
_a = 16
_a = 2
_a = 0.0_2
_a = 3
_a = 4
_a = '''last'''
_a = True
_a = None
_a = 0
def _A ( self: Any ):
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
_a = None
if self.use_input_lengths:
_a = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _A ( self: List[str] , __UpperCamelCase: Any , __UpperCamelCase: List[Any] , __UpperCamelCase: List[str] , __UpperCamelCase: Optional[int] , __UpperCamelCase: Any , __UpperCamelCase: Optional[Any] , __UpperCamelCase: Optional[int] , __UpperCamelCase: Optional[Any] , __UpperCamelCase: Tuple , ):
_a = TFFlaubertModel(config=__UpperCamelCase )
_a = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
_a = model(__UpperCamelCase )
_a = [input_ids, input_mask]
_a = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self: Dict , __UpperCamelCase: Any , __UpperCamelCase: Any , __UpperCamelCase: str , __UpperCamelCase: List[str] , __UpperCamelCase: Any , __UpperCamelCase: Union[str, Any] , __UpperCamelCase: int , __UpperCamelCase: List[Any] , __UpperCamelCase: Dict , ):
_a = TFFlaubertWithLMHeadModel(__UpperCamelCase )
_a = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
_a = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self: Tuple , __UpperCamelCase: Optional[int] , __UpperCamelCase: int , __UpperCamelCase: Optional[Any] , __UpperCamelCase: str , __UpperCamelCase: int , __UpperCamelCase: str , __UpperCamelCase: Dict , __UpperCamelCase: Union[str, Any] , __UpperCamelCase: List[Any] , ):
_a = TFFlaubertForQuestionAnsweringSimple(__UpperCamelCase )
_a = {'''input_ids''': input_ids, '''lengths''': input_lengths}
_a = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self: Dict , __UpperCamelCase: Optional[Any] , __UpperCamelCase: Any , __UpperCamelCase: Any , __UpperCamelCase: Any , __UpperCamelCase: Dict , __UpperCamelCase: Dict , __UpperCamelCase: str , __UpperCamelCase: Any , __UpperCamelCase: str , ):
_a = TFFlaubertForSequenceClassification(__UpperCamelCase )
_a = {'''input_ids''': input_ids, '''lengths''': input_lengths}
_a = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A ( self: Optional[Any] , __UpperCamelCase: List[Any] , __UpperCamelCase: Tuple , __UpperCamelCase: Optional[Any] , __UpperCamelCase: int , __UpperCamelCase: Tuple , __UpperCamelCase: Dict , __UpperCamelCase: str , __UpperCamelCase: Optional[Any] , __UpperCamelCase: Union[str, Any] , ):
_a = self.num_labels
_a = TFFlaubertForTokenClassification(config=__UpperCamelCase )
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_a = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A ( self: Optional[int] , __UpperCamelCase: Any , __UpperCamelCase: Optional[int] , __UpperCamelCase: List[Any] , __UpperCamelCase: Optional[Any] , __UpperCamelCase: Dict , __UpperCamelCase: List[str] , __UpperCamelCase: Tuple , __UpperCamelCase: str , __UpperCamelCase: str , ):
_a = self.num_choices
_a = TFFlaubertForMultipleChoice(config=__UpperCamelCase )
_a = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_a = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_a = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_a = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
_a = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A ( self: Dict ):
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
a: List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
a: List[Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
a: List[Any] = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
a: List[Any] = False
a: Dict = False
def _A ( self: List[str] , __UpperCamelCase: Optional[int] , __UpperCamelCase: Optional[int] , __UpperCamelCase: Optional[int] , __UpperCamelCase: List[str] , __UpperCamelCase: int ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _A ( self: Union[str, Any] ):
_a = TFFlaubertModelTester(self )
_a = ConfigTester(self , config_class=__UpperCamelCase , emb_dim=37 )
def _A ( self: str ):
self.config_tester.run_common_tests()
def _A ( self: Optional[int] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__UpperCamelCase )
def _A ( self: Optional[int] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__UpperCamelCase )
def _A ( self: int ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__UpperCamelCase )
def _A ( self: str ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCamelCase )
def _A ( self: int ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__UpperCamelCase )
def _A ( self: str ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__UpperCamelCase )
@slow
def _A ( self: List[Any] ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = TFFlaubertModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
@slow
def _A ( self: Any ):
_a = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
_a = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
_a = model(__UpperCamelCase )[0]
_a = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , __UpperCamelCase )
# compare the actual values for a slice.
_a = tf.convert_to_tensor(
[
[
[-1.8_7_6_8_7_7_3, -1.5_6_6_5_5_5, 0.2_7_0_7_2_4_1_8],
[-1.6_9_2_0_0_3_8, -0.5_8_7_3_5_0_5, 1.9_3_2_9_5_9_9],
[-2.9_5_6_3_9_8_5, -1.6_9_9_3_8_3_5, 1.7_9_7_2_0_5_2],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 487
| 1
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __lowerCamelCase ( snake_case__ ) -> Tuple:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
_SCREAMING_SNAKE_CASE = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(snake_case__ ,id=snake_case__ )
| 715
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class __UpperCAmelCase (_UpperCAmelCase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__snake_case : str = field(default="text-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
__snake_case : ClassVar[Features] = Features({"text": Value("string" )} )
__snake_case : ClassVar[Features] = Features({"labels": ClassLabel} )
__snake_case : str = "text"
__snake_case : str = "labels"
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , UpperCAmelCase_ ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
_SCREAMING_SNAKE_CASE = copy.deepcopy(self )
_SCREAMING_SNAKE_CASE = self.label_schema.copy()
_SCREAMING_SNAKE_CASE = features[self.label_column]
_SCREAMING_SNAKE_CASE = label_schema
return task_template
@property
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 569
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class _A ( __lowercase ):
lowercase__: int = '''xlm'''
lowercase__: Dict = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self : str , __magic_name__ : str=3_01_45 , __magic_name__ : str=20_48 , __magic_name__ : Dict=12 , __magic_name__ : List[Any]=16 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : List[Any]=0.1 , __magic_name__ : Optional[Any]=True , __magic_name__ : Tuple=False , __magic_name__ : List[Any]=False , __magic_name__ : List[str]=False , __magic_name__ : List[Any]=1 , __magic_name__ : int=True , __magic_name__ : List[Any]=5_12 , __magic_name__ : List[Any]=20_48**-0.5 , __magic_name__ : Tuple=1E-12 , __magic_name__ : List[str]=0.02 , __magic_name__ : List[str]=0 , __magic_name__ : str=1 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : List[Any]=3 , __magic_name__ : Union[str, Any]=5 , __magic_name__ : List[str]=True , __magic_name__ : Dict="first" , __magic_name__ : str=True , __magic_name__ : Optional[Any]=None , __magic_name__ : Optional[Any]=True , __magic_name__ : List[str]=0.1 , __magic_name__ : List[str]=5 , __magic_name__ : List[Any]=5 , __magic_name__ : int=0 , __magic_name__ : int=0 , __magic_name__ : List[str]=2 , __magic_name__ : int=0 , **__magic_name__ : Any , ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = vocab_size
__snake_case : List[Any] = emb_dim
__snake_case : Tuple = n_layers
__snake_case : Union[str, Any] = n_heads
__snake_case : Optional[int] = dropout
__snake_case : Dict = attention_dropout
__snake_case : Tuple = gelu_activation
__snake_case : Any = sinusoidal_embeddings
__snake_case : List[Any] = causal
__snake_case : Dict = asm
__snake_case : Optional[Any] = n_langs
__snake_case : Any = use_lang_emb
__snake_case : int = layer_norm_eps
__snake_case : List[Any] = bos_index
__snake_case : Any = eos_index
__snake_case : Union[str, Any] = pad_index
__snake_case : Optional[Any] = unk_index
__snake_case : List[Any] = mask_index
__snake_case : Union[str, Any] = is_encoder
__snake_case : Optional[int] = max_position_embeddings
__snake_case : int = embed_init_std
__snake_case : Dict = init_std
__snake_case : Any = summary_type
__snake_case : int = summary_use_proj
__snake_case : Any = summary_activation
__snake_case : Any = summary_proj_to_labels
__snake_case : List[str] = summary_first_dropout
__snake_case : List[Any] = start_n_top
__snake_case : List[Any] = end_n_top
__snake_case : str = mask_token_id
__snake_case : List[Any] = lang_id
if "n_words" in kwargs:
__snake_case : int = kwargs["""n_words"""]
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , **__magic_name__ )
class _A ( __lowercase ):
@property
def lowercase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__snake_case : str = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__snake_case : Dict = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 26
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''tanreinama/GPTSAN-2.8B-spout_is_uniform''': (
'''https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'''
),
}
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] = "gptsan-japanese"
SCREAMING_SNAKE_CASE_: Any = [
"past_key_values",
]
SCREAMING_SNAKE_CASE_: List[Any] = {
"hidden_size": "d_model",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , UpperCAmelCase_ : Dict=36_000 , UpperCAmelCase_ : Any=1_280 , UpperCAmelCase_ : Optional[int]=1_024 , UpperCAmelCase_ : str=8_192 , UpperCAmelCase_ : int=4_096 , UpperCAmelCase_ : Optional[int]=128 , UpperCAmelCase_ : int=10 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : str=128 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Tuple=1E-5 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Union[str, Any]="float32" , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Dict=0.002 , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : str=35_998 , UpperCAmelCase_ : str=35_995 , UpperCAmelCase_ : Optional[int]=35_999 , **UpperCAmelCase_ : int , ) -> int:
"""simple docstring"""
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = d_model
_lowerCAmelCase = d_ff
_lowerCAmelCase = d_ext
_lowerCAmelCase = d_spout
_lowerCAmelCase = num_switch_layers
_lowerCAmelCase = num_ext_layers
_lowerCAmelCase = num_switch_layers + num_ext_layers
_lowerCAmelCase = num_heads
_lowerCAmelCase = num_experts
_lowerCAmelCase = expert_capacity
_lowerCAmelCase = dropout_rate
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = router_bias
_lowerCAmelCase = router_jitter_noise
_lowerCAmelCase = router_dtype
_lowerCAmelCase = router_ignore_padding_tokens
_lowerCAmelCase = output_hidden_states
_lowerCAmelCase = output_attentions
_lowerCAmelCase = initializer_factor
_lowerCAmelCase = output_router_logits
_lowerCAmelCase = use_cache
super().__init__(
separator_token_id=UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
| 580
| 0
|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class a__ :
def __init__( self, _UpperCAmelCase=None, _UpperCAmelCase=None ):
'''simple docstring'''
lowercase__ = list(poly_a or [0] )[:]
lowercase__ = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase__ = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase__ = len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase__ = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase__ = complex(mpmath.root(x=1, n=self.c_max_length, k=1 ) )
# The product
lowercase__ = self.__multiply()
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB]
# Corner case
if len(_UpperCAmelCase ) <= 1:
return dft[0]
#
lowercase__ = self.c_max_length // 2
while next_ncol > 0:
lowercase__ = [[] for i in range(_UpperCAmelCase )]
lowercase__ = self.root**next_ncol
# First half of next step
lowercase__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_UpperCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_UpperCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase__ = new_dft
lowercase__ = next_ncol // 2
return dft[0]
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.__dft("A" )
lowercase__ = self.__dft("B" )
lowercase__ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase__ = 2
while next_ncol <= self.c_max_length:
lowercase__ = [[] for i in range(_UpperCAmelCase )]
lowercase__ = self.root ** (next_ncol // 2)
lowercase__ = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase__ = new_inverse_c
next_ncol *= 2
# Unpack
lowercase__ = [round(x[0].real, 8 ) + round(x[0].imag, 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ):
'''simple docstring'''
lowercase__ = "A = " + " + ".join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase__ = "B = " + " + ".join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase__ = "A*B = " + " + ".join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_: List[str] = 1_6
lowerCAmelCase_: Optional[Any] = 3_2
def __a ( A , A = 16 , A = "bert-base-cased" ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained(A )
lowercase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ = datasets.map(
A , batched=A , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(A , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["train"] , shuffle=A , collate_fn=A , batch_size=A )
lowercase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
def __a ( A , A ):
'''simple docstring'''
lowercase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["lr"]
lowercase__ = int(config["num_epochs"] )
lowercase__ = int(config["seed"] )
lowercase__ = int(config["batch_size"] )
lowercase__ = args.model_name_or_path
set_seed(A )
lowercase__ , lowercase__ = get_dataloaders(A , A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained(A , return_dict=A )
# Instantiate optimizer
lowercase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ = optimizer_cls(params=model.parameters() , lr=A )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowercase__ = 1
lowercase__ = (len(A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=0 , num_training_steps=A , )
else:
lowercase__ = DummyScheduler(A , total_num_steps=A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
A , A , A , A , A )
# We need to keep track of how many total steps we have iterated over
lowercase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ = 0
# Now we train the model
lowercase__ = evaluate.load("glue" , "mrpc" )
lowercase__ = 0
lowercase__ = {}
for epoch in range(A , A ):
model.train()
for step, batch in enumerate(A ):
lowercase__ = model(**A )
lowercase__ = outputs.loss
lowercase__ = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowercase__ = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**A )
lowercase__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase__ , lowercase__ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A ) - 1:
lowercase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A , references=A , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , A )
lowercase__ = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
lowercase__ = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(A , A )
def __a ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=A , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A , )
parser.add_argument(
"--output_dir" , type=A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=A , default=A , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=A , default=3 , help="Number of train epochs." , )
lowercase__ = parser.parse_args()
lowercase__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 668
| 1
|
def UpperCamelCase ( snake_case__ : Optional[Any] ):
'''simple docstring'''
def merge(snake_case__ : Optional[int] ,snake_case__ : Optional[Any] ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(snake_case__ ) <= 1:
return collection
__snake_case :Tuple = len(snake_case__ ) // 2
return merge(merge_sort(collection[:mid] ) ,merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 455
|
def __magic_name__ ( lowercase = 1000 ) -> int:
"""simple docstring"""
lowercase_ , lowercase_ : Optional[Any] = 1, 1
lowercase_ : Tuple = 2
while True:
lowercase_ : Dict = 0
lowercase_ : List[Any] = fa + fa
lowercase_ , lowercase_ : List[str] = fa, f
index += 1
for _ in str(lowercase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 458
| 0
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowercase_ ( lowerCAmelCase__ ):
__UpperCamelCase = "Speech2TextFeatureExtractor"
__UpperCamelCase = "Speech2TextTokenizer"
def __init__( self: Any, _lowercase: Dict, _lowercase: Any):
'''simple docstring'''
super().__init__(_lowercase, _lowercase)
__lowerCAmelCase = self.feature_extractor
__lowerCAmelCase = False
def __call__( self: Union[str, Any], *_lowercase: Optional[Any], **_lowercase: Dict):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_lowercase, **_lowercase)
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""")
__lowerCAmelCase = kwargs.pop("""raw_speech""")
else:
__lowerCAmelCase = kwargs.pop("""audio""", _lowercase)
__lowerCAmelCase = kwargs.pop("""sampling_rate""", _lowercase)
__lowerCAmelCase = kwargs.pop("""text""", _lowercase)
if len(_lowercase) > 0:
__lowerCAmelCase = args[0]
__lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""")
if audio is not None:
__lowerCAmelCase = self.feature_extractor(_lowercase, *_lowercase, sampling_rate=_lowercase, **_lowercase)
if text is not None:
__lowerCAmelCase = self.tokenizer(_lowercase, **_lowercase)
if text is None:
return inputs
elif audio is None:
return encodings
else:
__lowerCAmelCase = encodings["""input_ids"""]
return inputs
def _lowercase ( self: Union[str, Any], *_lowercase: Any, **_lowercase: Optional[Any]):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowercase, **_lowercase)
def _lowercase ( self: Tuple, *_lowercase: List[str], **_lowercase: Union[str, Any]):
'''simple docstring'''
return self.tokenizer.decode(*_lowercase, **_lowercase)
@contextmanager
def _lowercase ( self: Tuple):
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""")
__lowerCAmelCase = True
__lowerCAmelCase = self.tokenizer
yield
__lowerCAmelCase = self.feature_extractor
__lowerCAmelCase = False
| 334
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__A : Any = data_utils.TransfoXLTokenizer
__A : Union[str, Any] = data_utils.TransfoXLCorpus
__A : Optional[int] = data_utils
__A : Optional[int] = data_utils
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCamelCase__ , """rb""" ) as fp:
__lowerCAmelCase = pickle.load(UpperCamelCase__ , encoding="""latin1""" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__lowerCAmelCase = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
__lowerCAmelCase = corpus.vocab.__dict__
torch.save(UpperCamelCase__ , UpperCamelCase__ )
__lowerCAmelCase = corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""" , UpperCamelCase__ )
__lowerCAmelCase = pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__lowerCAmelCase = os.path.abspath(UpperCamelCase__ )
__lowerCAmelCase = os.path.abspath(UpperCamelCase__ )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__lowerCAmelCase = TransfoXLConfig()
else:
__lowerCAmelCase = TransfoXLConfig.from_json_file(UpperCamelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
__lowerCAmelCase = TransfoXLLMHeadModel(UpperCamelCase__ )
__lowerCAmelCase = load_tf_weights_in_transfo_xl(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
__lowerCAmelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
__lowerCAmelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(F'''Save PyTorch model to {os.path.abspath(UpperCamelCase__ )}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
print(F'''Save configuration file to {os.path.abspath(UpperCamelCase__ )}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
__A : Tuple = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 334
| 1
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A_(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ : str = IFInpaintingSuperResolutionPipeline
a_ : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
a_ : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
a_ : List[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def _lowerCAmelCase ( self ):
return self._get_superresolution_dummy_components()
def _lowerCAmelCase ( self , A , A=0 ):
if str(A ).startswith('mps' ):
_lowerCamelCase : List[Any] = torch.manual_seed(A )
else:
_lowerCamelCase : Dict = torch.Generator(device=A ).manual_seed(A )
_lowerCamelCase : List[str] = floats_tensor((1, 3, 16, 16) , rng=random.Random(A ) ).to(A )
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_lowerCamelCase : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_lowerCamelCase : Optional[int] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCAmelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _lowerCAmelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _lowerCAmelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _lowerCAmelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _lowerCAmelCase ( self ):
self._test_save_load_local()
def _lowerCAmelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 437
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
a_ = logging.get_logger(__name__)
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , *A , **A ):
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , A , )
super().__init__(*A , **A )
| 437
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : List[str] = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 716
|
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCamelCase :
def __init__( self :List[Any] , lowerCamelCase :Optional[Any] , lowerCamelCase :Any=3 , lowerCamelCase :List[str]=32 , lowerCamelCase :List[str]=3 , lowerCamelCase :List[str]=10 , lowerCamelCase :List[Any]=[10, 20, 30, 40] , lowerCamelCase :Optional[Any]=[1, 1, 2, 1] , lowerCamelCase :List[str]=True , lowerCamelCase :List[Any]=True , lowerCamelCase :str="relu" , lowerCamelCase :Optional[Any]=3 , lowerCamelCase :List[str]=None , ) -> Union[str, Any]:
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = embeddings_size
UpperCAmelCase__ = hidden_sizes
UpperCAmelCase__ = depths
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = scope
UpperCAmelCase__ = len(lowerCamelCase )
def UpperCAmelCase_ ( self :Union[str, Any] ) -> List[str]:
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self :Tuple ) -> Optional[int]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self :str , lowerCamelCase :Dict , lowerCamelCase :Optional[int] , lowerCamelCase :Union[str, Any] ) -> Dict:
UpperCAmelCase__ = RegNetModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self :Union[str, Any] , lowerCamelCase :Union[str, Any] , lowerCamelCase :Tuple , lowerCamelCase :List[str] ) -> Union[str, Any]:
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = RegNetForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self :Any ) -> Optional[Any]:
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase_ = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
UpperCAmelCase_ = (
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def UpperCAmelCase_ ( self :int ) -> Dict:
UpperCAmelCase__ = RegNetModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def UpperCAmelCase_ ( self :str ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self :Any ) -> List[str]:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def UpperCAmelCase_ ( self :Optional[Any] ) -> Any:
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def UpperCAmelCase_ ( self :Tuple ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self :List[str] ) -> Tuple:
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(lowerCamelCase )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def UpperCAmelCase_ ( self :Dict ) -> List[Any]:
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def UpperCAmelCase_ ( self :Optional[Any] ) -> int:
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(config=lowerCamelCase )
for name, module in model.named_modules():
if isinstance(lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def UpperCAmelCase_ ( self :Optional[int] ) -> List[Any]:
def check_hidden_states_output(lowerCamelCase :Optional[int] , lowerCamelCase :int , lowerCamelCase :Optional[int] ):
UpperCAmelCase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase__ = layer_type
UpperCAmelCase__ = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def UpperCAmelCase_ ( self :Dict ) -> Union[str, Any]:
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def UpperCAmelCase_ ( self :Tuple ) -> Tuple:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = RegNetModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self :Any ) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self :Tuple ) -> Union[str, Any]:
UpperCAmelCase__ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(**lowerCamelCase )
# verify the logits
UpperCAmelCase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
UpperCAmelCase__ = torch.tensor([-0.41_80, -1.50_51, -3.48_36] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
| 364
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_A : Any = logging.get_logger(__name__)
_A : int = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = """swin"""
lowerCamelCase__ : List[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , A_=2_24 , A_=4 , A_=3 , A_=96 , A_=[2, 2, 6, 2] , A_=[3, 6, 12, 24] , A_=7 , A_=4.0 , A_=True , A_=0.0 , A_=0.0 , A_=0.1 , A_="gelu" , A_=False , A_=0.02 , A_=1E-5 , A_=32 , A_=None , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = embed_dim
SCREAMING_SNAKE_CASE__ = depths
SCREAMING_SNAKE_CASE__ = len(A_ )
SCREAMING_SNAKE_CASE__ = num_heads
SCREAMING_SNAKE_CASE__ = window_size
SCREAMING_SNAKE_CASE__ = mlp_ratio
SCREAMING_SNAKE_CASE__ = qkv_bias
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = drop_path_rate
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = use_absolute_embeddings
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE__ = int(embed_dim * 2 ** (len(A_ ) - 1) )
SCREAMING_SNAKE_CASE__ = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(A_ ) + 1 )]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : int = version.parse("""1.11""" )
@property
def lowercase_ ( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase_ ( self ):
'''simple docstring'''
return 1E-4
| 100
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple=13 , SCREAMING_SNAKE_CASE__ : Optional[Any]=10 , SCREAMING_SNAKE_CASE__ : Optional[int]=3 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any=32 , SCREAMING_SNAKE_CASE__ : Optional[int]=5 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : List[Any]=37 , SCREAMING_SNAKE_CASE__ : int="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Any=10 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : Tuple="divided_space_time" , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ) -> List[str]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_frames
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = attention_type
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scope
lowerCAmelCase__ = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowerCAmelCase__ = (image_size // patch_size) ** 2
lowerCAmelCase__ = (num_frames) * self.num_patches_per_frame + 1
def a ( self : int ) -> Tuple:
lowerCAmelCase__ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def a ( self : List[Any] ) -> Any:
lowerCAmelCase__ = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowerCAmelCase__ = self.num_labels
return config
def a ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
lowerCAmelCase__ = TimesformerModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
lowerCAmelCase__ = TimesformerForVideoClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
# verify the logits shape
lowerCAmelCase__ = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> Dict:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
snake_case__ = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def a ( self : List[str] ) -> List[Any]:
lowerCAmelCase__ = TimesformerModelTester(self )
lowerCAmelCase__ = ConfigTester(
self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> str:
lowerCAmelCase__ = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
return inputs_dict
def a ( self : Optional[Any] ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def a ( self : Union[str, Any] ) -> Tuple:
pass
def a ( self : Dict ) -> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def a ( self : int ) -> Optional[Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def a ( self : int ) -> Optional[Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def a ( self : str ) -> Tuple:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TimesformerModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def a ( self : int ) -> Dict:
if not self.has_attentions:
pass
else:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
for model_class in self.all_model_classes:
lowerCAmelCase__ = self.model_tester.seq_length
lowerCAmelCase__ = self.model_tester.num_frames
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
# Check attention is always last and order is fine
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(out_len + 1 , len(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def a ( self : List[str] ) -> Any:
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ):
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = outputs.hidden_states
lowerCAmelCase__ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
lowerCAmelCase__ = np.load(lowerCAmelCase_ )
return list(lowerCAmelCase_ )
@require_torch
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self : Optional[Any] ) -> Union[str, Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def a ( self : Optional[Any] ) -> str:
lowerCAmelCase__ = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_video()
lowerCAmelCase__ = image_processor(video[:8] , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 61
| 0
|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_UpperCAmelCase = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __magic_name__ ( nn.Module ):
"""simple docstring"""
def __init__( self , a__ ):
super().__init__()
_lowerCamelCase = torchvision.models.resnetaaa(pretrained=a__ )
_lowerCamelCase = list(model.children() )[:-2]
_lowerCamelCase = nn.Sequential(*a__ )
_lowerCamelCase = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _UpperCAmelCase ( self , a__ ):
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
_lowerCamelCase = self.pool(self.model(a__ ) )
_lowerCamelCase = torch.flatten(a__ , start_dim=2 )
_lowerCamelCase = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
def __init__( self , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = [json.loads(a__ ) for l in open(a__ )]
_lowerCamelCase = os.path.dirname(a__ )
_lowerCamelCase = tokenizer
_lowerCamelCase = labels
_lowerCamelCase = len(a__ )
_lowerCamelCase = max_seq_length
_lowerCamelCase = transforms
def __len__( self ):
return len(self.data )
def __getitem__( self , a__ ):
_lowerCamelCase = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=a__ ) )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = sentence[0], sentence[1:-1], sentence[-1]
_lowerCamelCase = sentence[: self.max_seq_length]
_lowerCamelCase = torch.zeros(self.n_classes )
_lowerCamelCase = 1
_lowerCamelCase = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' )
_lowerCamelCase = self.transforms(a__ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _UpperCAmelCase ( self ):
_lowerCamelCase = Counter()
for row in self.data:
label_freqs.update(row['''label'''] )
return label_freqs
def _lowerCamelCase ( _a ):
"""simple docstring"""
_lowerCamelCase = [len(row['''sentence'''] ) for row in batch]
_lowerCamelCase , _lowerCamelCase = len(_a ), max(_a )
_lowerCamelCase = torch.zeros(_a , _a , dtype=torch.long )
_lowerCamelCase = torch.zeros(_a , _a , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_a , _a ) ):
_lowerCamelCase = input_row['''sentence''']
_lowerCamelCase = 1
_lowerCamelCase = torch.stack([row['''image'''] for row in batch] )
_lowerCamelCase = torch.stack([row['''label'''] for row in batch] )
_lowerCamelCase = torch.stack([row['''image_start_token'''] for row in batch] )
_lowerCamelCase = torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def _lowerCamelCase ( ):
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def _lowerCamelCase ( ):
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
] )
| 297
|
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _lowerCamelCase ( _a ):
"""simple docstring"""
_lowerCamelCase = FileLock(str(tmpdir / '''foo.lock''' ) )
_lowerCamelCase = FileLock(str(tmpdir / '''foo.lock''' ) )
_lowerCamelCase = 0.01
with locka.acquire():
with pytest.raises(_a ):
_lowerCamelCase = time.time()
locka.acquire(_a )
assert time.time() - _start > timeout
def _lowerCamelCase ( _a ):
"""simple docstring"""
_lowerCamelCase = '''a''' * 1_0_0_0 + '''.lock'''
_lowerCamelCase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(_a )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
_lowerCamelCase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(_a ):
locka.acquire(0 )
| 297
| 1
|
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __lowercase ( __lowercase ):
"""simple docstring"""
UpperCamelCase : int = "dandelin/vilt-b32-finetuned-vqa"
UpperCamelCase : Union[str, Any] = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
UpperCamelCase : Optional[int] = "image_qa"
UpperCamelCase : Dict = AutoProcessor
UpperCamelCase : Optional[int] = AutoModelForVisualQuestionAnswering
UpperCamelCase : int = ["image", "text"]
UpperCamelCase : Optional[Any] = ["text"]
def __init__( self , *A , **A ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*A , **A )
def __A ( self , A , A ) -> int:
'''simple docstring'''
return self.pre_processor(A , A , return_tensors="""pt""" )
def __A ( self , A ) -> Any:
'''simple docstring'''
with torch.no_grad():
return self.model(**A ).logits
def __A ( self , A ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 457
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
def __UpperCAmelCase ( snake_case_ : Union[str, Any] , snake_case_ : str ) -> Union[str, Any]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
__UpperCamelCase = field(metadata={'help': 'Should contain the data files for the task.'} )
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , snake_case_ )
# Set seed
set_seed(training_args.seed )
try:
_lowerCAmelCase = processors[data_args.task_name]()
_lowerCAmelCase = processor.get_labels()
_lowerCAmelCase = len(snake_case_ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , )
# Get datasets
_lowerCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=snake_case_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_lowerCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=snake_case_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(snake_case_ : EvalPrediction ) -> Dict:
_lowerCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(snake_case_ , p.label_ids )}
# Data collator
_lowerCAmelCase = DataCollatorWithPadding(snake_case_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_lowerCAmelCase = Trainer(
model=snake_case_ , args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , compute_metrics=snake_case_ , data_collator=snake_case_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_lowerCAmelCase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCAmelCase = trainer.evaluate()
_lowerCAmelCase = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(snake_case_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , snake_case_ , snake_case_ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(snake_case_ )
return results
def __UpperCAmelCase ( snake_case_ : Optional[Any] ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 156
| 0
|
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase = logging.getLogger(__name__)
def __lowerCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] ) -> Any:
# save results
if os.path.exists(UpperCAmelCase__ ):
if os.path.exists(os.path.join(UpperCAmelCase__ , """config.json""" ) ) and os.path.isfile(
os.path.join(UpperCAmelCase__ , """config.json""" ) ):
os.remove(os.path.join(UpperCAmelCase__ , """config.json""" ) )
if os.path.exists(os.path.join(UpperCAmelCase__ , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(UpperCAmelCase__ , """pytorch_model.bin""" ) ):
os.remove(os.path.join(UpperCAmelCase__ , """pytorch_model.bin""" ) )
else:
os.makedirs(UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
def __lowerCAmelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=False ) -> int:
lowerCamelCase_ = 2
if unlogit:
lowerCamelCase_ = torch.pow(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase_ = p * torch.log(UpperCAmelCase__ )
lowerCamelCase_ = 0
return -plogp.sum(dim=-1 )
def __lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] ) -> Optional[int]:
logger.info("""lv, h >\t""" + """\t""".join(F'''{x + 1}''' for x in range(len(UpperCAmelCase__ ) ) ) )
for row in range(len(UpperCAmelCase__ ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + """\t""".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + """\t""".join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def __lowerCAmelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int=True , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[int]=False ) -> int:
lowerCamelCase_ , lowerCamelCase_ = model.config.num_hidden_layers, model.config.num_attention_heads
lowerCamelCase_ = torch.zeros(UpperCAmelCase__ , UpperCAmelCase__ ).to(args.device )
lowerCamelCase_ = torch.zeros(UpperCAmelCase__ , UpperCAmelCase__ ).to(args.device )
if head_mask is None:
lowerCamelCase_ = torch.ones(UpperCAmelCase__ , UpperCAmelCase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=UpperCAmelCase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCamelCase_ = None
lowerCamelCase_ = 0.0
lowerCamelCase_ = 0.0
for step, inputs in enumerate(tqdm(UpperCAmelCase__ , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
lowerCamelCase_ = tuple(t.to(args.device ) for t in inputs )
((lowerCamelCase_) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCamelCase_ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , head_mask=UpperCAmelCase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(UpperCAmelCase__ ):
lowerCamelCase_ = entropy(attn.detach() , UpperCAmelCase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(UpperCAmelCase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCamelCase_ = 2
lowerCamelCase_ = torch.pow(torch.pow(UpperCAmelCase__ , UpperCAmelCase__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
lowerCamelCase_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(UpperCAmelCase__ )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(UpperCAmelCase__ )
logger.info("""Head ranked by importance scores""" )
lowerCamelCase_ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowerCamelCase_ = torch.arange(
head_importance.numel() , device=args.device )
lowerCamelCase_ = head_ranks.view_as(UpperCAmelCase__ )
print_ad_tensor(UpperCAmelCase__ )
return attn_entropy, head_importance, total_loss
def __lowerCAmelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict ) -> Optional[int]:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = compute_heads_importance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , compute_entropy=UpperCAmelCase__ )
lowerCamelCase_ = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , UpperCAmelCase__ , original_score * args.masking_threshold )
lowerCamelCase_ = torch.ones_like(UpperCAmelCase__ )
lowerCamelCase_ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowerCamelCase_ = original_score
while current_score >= original_score * args.masking_threshold:
lowerCamelCase_ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCamelCase_ = float("""Inf""" )
lowerCamelCase_ = head_importance.view(-1 ).sort()[1]
if len(UpperCAmelCase__ ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
lowerCamelCase_ = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
lowerCamelCase_ = new_head_mask.view(-1 )
lowerCamelCase_ = 0.0
lowerCamelCase_ = new_head_mask.view_as(UpperCAmelCase__ )
lowerCamelCase_ = new_head_mask.clone().detach()
print_ad_tensor(UpperCAmelCase__ )
# Compute metric and head importance again
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = compute_heads_importance(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , compute_entropy=UpperCAmelCase__ , head_mask=UpperCAmelCase__ )
lowerCamelCase_ = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , UpperCAmelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info("""Final head mask""" )
print_ad_tensor(UpperCAmelCase__ )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def __lowerCAmelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] ) -> int:
lowerCamelCase_ = datetime.now()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = compute_heads_importance(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , compute_entropy=UpperCAmelCase__ , compute_importance=UpperCAmelCase__ , head_mask=UpperCAmelCase__ )
lowerCamelCase_ = 1 / loss
lowerCamelCase_ = datetime.now() - before_time
lowerCamelCase_ = sum(p.numel() for p in model.parameters() )
lowerCamelCase_ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCAmelCase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCamelCase_ = [
v,
]
assert sum(len(UpperCAmelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(UpperCAmelCase__ )
lowerCamelCase_ = sum(p.numel() for p in model.parameters() )
lowerCamelCase_ = datetime.now()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = compute_heads_importance(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , compute_entropy=UpperCAmelCase__ , compute_importance=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , actually_pruned=UpperCAmelCase__ , )
lowerCamelCase_ = 1 / loss
lowerCamelCase_ = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , UpperCAmelCase__ , UpperCAmelCase__ , pruned_num_params / original_num_params * 1_0_0 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , UpperCAmelCase__ , UpperCAmelCase__ )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 1_0_0 )
save_model(UpperCAmelCase__ , args.output_dir )
def __lowerCAmelCase ( ) -> Optional[Any]:
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=UpperCAmelCase__ , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=UpperCAmelCase__ , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=UpperCAmelCase__ , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=UpperCAmelCase__ , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=UpperCAmelCase__ , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=UpperCAmelCase__ , type=UpperCAmelCase__ , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=UpperCAmelCase__ , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=UpperCAmelCase__ , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=UpperCAmelCase__ , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=UpperCAmelCase__ , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=1_2_8 , type=UpperCAmelCase__ , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=UpperCAmelCase__ , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=UpperCAmelCase__ , default=4_2 )
parser.add_argument("""--local_rank""" , type=UpperCAmelCase__ , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=UpperCAmelCase__ , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=UpperCAmelCase__ , default="""""" , help="""Can be used for distant debugging.""" )
lowerCamelCase_ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCAmelCase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCamelCase_ = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
lowerCamelCase_ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowerCamelCase_ = torch.device("""cuda""" , args.local_rank )
lowerCamelCase_ = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowerCamelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowerCamelCase_ = nn.parallel.DistributedDataParallel(
UpperCAmelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCAmelCase__ )
elif args.n_gpu > 1:
lowerCamelCase_ = nn.DataParallel(UpperCAmelCase__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=UpperCAmelCase__ )
torch.save(UpperCAmelCase__ , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , UpperCAmelCase__ )
# Prepare dataset
lowerCamelCase_ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowerCamelCase_ = (torch.from_numpy(UpperCAmelCase__ ),)
lowerCamelCase_ = TensorDataset(*UpperCAmelCase__ )
lowerCamelCase_ = RandomSampler(UpperCAmelCase__ )
lowerCamelCase_ = DataLoader(UpperCAmelCase__ , sampler=UpperCAmelCase__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCamelCase_ = mask_heads(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
prune_heads(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 103
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.txt'''}
lowercase = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
lowercase = {
'''openbmb/cpm-ant-10b''': 1_0_2_4,
}
def __lowerCAmelCase ( UpperCAmelCase__ : List[str] ) -> Dict:
lowerCamelCase_ = collections.OrderedDict()
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" ) as reader:
lowerCamelCase_ = reader.readlines()
for index, token in enumerate(UpperCAmelCase__ ):
lowerCamelCase_ = token.rstrip("""\n""" )
lowerCamelCase_ = index
return vocab
class __A( UpperCAmelCase ):
def __init__( self : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : List[str]="<unk>" , __UpperCamelCase : List[str]=2_0_0 ):
lowerCamelCase_ = vocab
lowerCamelCase_ = unk_token
lowerCamelCase_ = max_input_chars_per_word
def lowercase__ ( self : int , __UpperCamelCase : int ):
lowerCamelCase_ = list(__UpperCamelCase )
if len(__UpperCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
lowerCamelCase_ = 0
lowerCamelCase_ = []
while start < len(__UpperCamelCase ):
lowerCamelCase_ = len(__UpperCamelCase )
lowerCamelCase_ = None
while start < end:
lowerCamelCase_ = """""".join(chars[start:end] )
if substr in self.vocab:
lowerCamelCase_ = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__UpperCamelCase )
lowerCamelCase_ = end
return sub_tokens
class __A( UpperCAmelCase ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE = False
def __init__( self : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int]="<d>" , __UpperCamelCase : List[str]="</d>" , __UpperCamelCase : Optional[Any]="<s>" , __UpperCamelCase : Union[str, Any]="</s>" , __UpperCamelCase : List[str]="<pad>" , __UpperCamelCase : Union[str, Any]="<unk>" , __UpperCamelCase : List[Any]="</n>" , __UpperCamelCase : Tuple="</_>" , __UpperCamelCase : Optional[Any]="left" , **__UpperCamelCase : List[str] , ):
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=__UpperCamelCase , eod_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , pad_token=__UpperCamelCase , unk_token=__UpperCamelCase , line_token=__UpperCamelCase , space_token=__UpperCamelCase , padding_side=__UpperCamelCase , **__UpperCamelCase , )
lowerCamelCase_ = bod_token
lowerCamelCase_ = eod_token
lowerCamelCase_ = load_vocab(__UpperCamelCase )
lowerCamelCase_ = self.encoder[space_token]
lowerCamelCase_ = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowerCamelCase_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __UpperCamelCase : x[1] ) )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowercase__ ( self : Any ):
return self.encoder[self.bod_token]
@property
def lowercase__ ( self : Any ):
return self.encoder[self.eod_token]
@property
def lowercase__ ( self : int ):
return self.encoder["\n"]
@property
def lowercase__ ( self : str ):
return len(self.encoder )
def lowercase__ ( self : Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : Any , __UpperCamelCase : Optional[Any] ):
lowerCamelCase_ = []
for x in jieba.cut(__UpperCamelCase , cut_all=__UpperCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__UpperCamelCase ) )
return output_tokens
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Dict , **__UpperCamelCase : str ):
lowerCamelCase_ = [i for i in token_ids if i >= 0]
lowerCamelCase_ = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Dict , __UpperCamelCase : Optional[Any] ):
return token in self.encoder
def lowercase__ ( self : int , __UpperCamelCase : List[str] ):
return "".join(__UpperCamelCase )
def lowercase__ ( self : List[str] , __UpperCamelCase : int ):
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : Any , __UpperCamelCase : Union[str, Any] ):
return self.decoder.get(__UpperCamelCase , self.unk_token )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
if os.path.isdir(__UpperCamelCase ):
lowerCamelCase_ = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
lowerCamelCase_ = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
lowerCamelCase_ = 0
if " " in self.encoder:
lowerCamelCase_ = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
lowerCamelCase_ = self.encoder["""\n"""]
del self.encoder["\n"]
lowerCamelCase_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __UpperCamelCase : x[1] ) )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
lowerCamelCase_ = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def lowercase__ ( self : Any , __UpperCamelCase : List[int] , __UpperCamelCase : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowercase__ ( self : int , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase ))
return [1] + ([0] * len(__UpperCamelCase ))
| 103
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : int = {
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = ['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 533
|
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = [[float("""inf""" ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )]
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : List[str] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(SCREAMING_SNAKE_CASE__ ):
# looping through rows of graph array
for i in range(SCREAMING_SNAKE_CASE__ ):
# looping through columns of graph array
for j in range(SCREAMING_SNAKE_CASE__ ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_SCREAMING_SNAKE_CASE : int = dist[i][k] + dist[k][j]
_print_dist(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return dist, v
if __name__ == "__main__":
UpperCAmelCase_ : str = int(input('Enter number of vertices: '))
UpperCAmelCase_ : Union[str, Any] = int(input('Enter number of edges: '))
UpperCAmelCase_ : Dict = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
UpperCAmelCase_ : Union[str, Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
UpperCAmelCase_ : int = int(input('Enter source:'))
UpperCAmelCase_ : List[str] = int(input('Enter destination:'))
UpperCAmelCase_ : List[str] = float(input('Enter weight:'))
UpperCAmelCase_ : int = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 533
| 1
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
lowercase_ = logging.get_logger(__name__)
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : str=None ) -> str:
'''simple docstring'''
if "." in tensor_name:
A__ = tensor_name.split('.' )
for split in splits[:-1]:
A__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
A__ = new_module
A__ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
A__ = tensor_name in module._buffers
A__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
A__ = False
A__ = False
if is_buffer or not is_bitsandbytes_available():
A__ = False
A__ = False
else:
A__ = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
A__ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
A__ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
A__ = old_value.to(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
A__ = value.to('cpu' )
if value.dtype == torch.inta:
A__ = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
A__ = torch.tensor(SCREAMING_SNAKE_CASE__ , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , SCREAMING_SNAKE_CASE__ ) and fpaa_statistics is None:
A__ = new_value.T
A__ = old_value.__dict__
if is_abit:
A__ = bnb.nn.IntaParams(SCREAMING_SNAKE_CASE__ , requires_grad=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
elif is_abit:
A__ = bnb.nn.Paramsabit(SCREAMING_SNAKE_CASE__ , requires_grad=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
A__ = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(SCREAMING_SNAKE_CASE__ ) )
else:
if value is None:
A__ = old_value.to(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
A__ = value.to(SCREAMING_SNAKE_CASE__ )
else:
A__ = torch.tensor(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
if is_buffer:
A__ = new_value
else:
A__ = nn.Parameter(SCREAMING_SNAKE_CASE__ , requires_grad=old_value.requires_grad )
A__ = new_value
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Optional[int]=False ) -> List[str]:
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
A__ = []
current_key_name.append(SCREAMING_SNAKE_CASE__ )
if (isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) or isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(SCREAMING_SNAKE_CASE__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ , A__ = module.weight.shape
else:
A__ = module.in_features
A__ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
A__ = bnb.nn.LinearabitLt(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
A__ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
A__ = bnb.nn.Linearabit(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
A__ = True
# Store the module class in case we need to transpose the weight later
A__ = type(SCREAMING_SNAKE_CASE__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(SCREAMING_SNAKE_CASE__ )
if len(list(module.children() ) ) > 0:
A__ , A__ = _replace_with_bnb_linear(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , has_been_replaced=SCREAMING_SNAKE_CASE__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : str=None ) -> List[Any]:
'''simple docstring'''
A__ = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
A__ , A__ = _replace_with_bnb_linear(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def _snake_case( *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , SCREAMING_SNAKE_CASE__ , )
return replace_with_bnb_linear(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _snake_case( *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
'''simple docstring'''
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , SCREAMING_SNAKE_CASE__ , )
return set_module_quantized_tensor_to_device(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ = deepcopy(SCREAMING_SNAKE_CASE__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
A__ = find_tied_parameters(SCREAMING_SNAKE_CASE__ )
# For compatibility with Accelerate < 0.18
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A__ = sum(SCREAMING_SNAKE_CASE__ , [] )
A__ = len(SCREAMING_SNAKE_CASE__ ) > 0
# Check if it is a base model
A__ = not hasattr(SCREAMING_SNAKE_CASE__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A__ = list(model.named_children() )
A__ = [list_modules[-1][0]]
# add last module together with tied weights
A__ = set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ )
A__ = list(set(SCREAMING_SNAKE_CASE__ ) ) + list(SCREAMING_SNAKE_CASE__ )
# remove ".weight" from the keys
A__ = ['.weight', '.bias']
A__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A__ = name.replace(SCREAMING_SNAKE_CASE__ , '' )
filtered_module_names.append(SCREAMING_SNAKE_CASE__ )
return filtered_module_names
| 701
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowercase_ = random.Random()
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int]=1.0 , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Tuple=None ) -> List[Any]:
'''simple docstring'''
if rng is None:
A__ = global_rng
A__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple,lowercase_ : str,lowercase_ : Optional[Any]=7,lowercase_ : Union[str, Any]=4_0_0,lowercase_ : Optional[int]=2_0_0_0,lowercase_ : Dict=2_0_4_8,lowercase_ : int=1_2_8,lowercase_ : str=1,lowercase_ : List[Any]=5_1_2,lowercase_ : Union[str, Any]=3_0,lowercase_ : Any=4_4_1_0_0,)-> Union[str, Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = min_seq_length
A__ = max_seq_length
A__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ = spectrogram_length
A__ = feature_size
A__ = num_audio_channels
A__ = hop_length
A__ = chunk_length
A__ = sampling_rate
def snake_case__ ( self : Tuple )-> Dict:
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def snake_case__ ( self : Tuple,lowercase_ : List[Any]=False,lowercase_ : Optional[int]=False )-> str:
'''simple docstring'''
def _flatten(lowercase_ : Any ):
return list(itertools.chain(*lowercase_ ) )
if equal_length:
A__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length,self.max_seq_length,self.seq_length_diff )
]
if numpify:
A__ = [np.asarray(lowercase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = TvltFeatureExtractor
def snake_case__ ( self : Optional[Any] )-> Dict:
'''simple docstring'''
A__ = TvltFeatureExtractionTester(self )
def snake_case__ ( self : List[Any] )-> Any:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowercase_,'spectrogram_length' ) )
self.assertTrue(hasattr(lowercase_,'feature_size' ) )
self.assertTrue(hasattr(lowercase_,'num_audio_channels' ) )
self.assertTrue(hasattr(lowercase_,'hop_length' ) )
self.assertTrue(hasattr(lowercase_,'chunk_length' ) )
self.assertTrue(hasattr(lowercase_,'sampling_rate' ) )
def snake_case__ ( self : Dict )-> Any:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = feat_extract_first.save_pretrained(lowercase_ )[0]
check_json_file_has_correct_format(lowercase_ )
A__ = self.feature_extraction_class.from_pretrained(lowercase_ )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = dict_first.pop('mel_filters' )
A__ = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(lowercase_,lowercase_ ) )
self.assertEqual(lowercase_,lowercase_ )
def snake_case__ ( self : str )-> Dict:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = os.path.join(lowercase_,'feat_extract.json' )
feat_extract_first.to_json_file(lowercase_ )
A__ = self.feature_extraction_class.from_json_file(lowercase_ )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = dict_first.pop('mel_filters' )
A__ = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(lowercase_,lowercase_ ) )
self.assertEqual(lowercase_,lowercase_ )
def snake_case__ ( self : Optional[int] )-> Optional[int]:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
A__ = [floats_list((1, x) )[0] for x in range(8_0_0,1_4_0_0,2_0_0 )]
A__ = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
# Test not batched input
A__ = feature_extractor(np_speech_inputs[0],return_tensors='np',sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
A__ = feature_extractor(lowercase_,return_tensors='np',sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
A__ = feature_extractor(
lowercase_,return_tensors='np',sampling_rate=4_4_1_0_0,mask_audio=lowercase_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
A__ = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
A__ = np.asarray(lowercase_ )
A__ = feature_extractor(lowercase_,return_tensors='np',sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def snake_case__ ( self : Optional[Any],lowercase_ : Tuple )-> Tuple:
'''simple docstring'''
A__ = load_dataset('hf-internal-testing/librispeech_asr_dummy','clean',split='validation' )
# automatic decoding with librispeech
A__ = ds.sort('id' ).select(range(lowercase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def snake_case__ ( self : List[Any] )-> Any:
'''simple docstring'''
A__ = self._load_datasamples(1 )
A__ = TvltFeatureExtractor()
A__ = feature_extractor(lowercase_,return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape,(1, 1, 1_9_2, 1_2_8) )
A__ = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2],lowercase_,atol=1E-4 ) )
| 586
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_: Any = logging.get_logger(__name__)
A_: Optional[int] = {
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = 'transfo-xl'
lowerCAmelCase__ = ['mems']
lowerCAmelCase__ = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , UpperCAmelCase=267735 , UpperCAmelCase=[20000, 40000, 200000] , UpperCAmelCase=1024 , UpperCAmelCase=1024 , UpperCAmelCase=16 , UpperCAmelCase=64 , UpperCAmelCase=4096 , UpperCAmelCase=4 , UpperCAmelCase=False , UpperCAmelCase=18 , UpperCAmelCase=1600 , UpperCAmelCase=1000 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=0 , UpperCAmelCase=-1 , UpperCAmelCase=True , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=True , UpperCAmelCase="normal" , UpperCAmelCase=0.01 , UpperCAmelCase=0.01 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=0 , **UpperCAmelCase , ):
'''simple docstring'''
_lowercase = vocab_size
_lowercase = []
self.cutoffs.extend(UpperCAmelCase )
if proj_share_all_but_first:
_lowercase = [False] + [True] * len(self.cutoffs )
else:
_lowercase = [False] + [False] * len(self.cutoffs )
_lowercase = d_model
_lowercase = d_embed
_lowercase = d_head
_lowercase = d_inner
_lowercase = div_val
_lowercase = pre_lnorm
_lowercase = n_layer
_lowercase = n_head
_lowercase = mem_len
_lowercase = same_length
_lowercase = attn_type
_lowercase = clamp_len
_lowercase = sample_softmax
_lowercase = adaptive
_lowercase = dropout
_lowercase = dropatt
_lowercase = untie_r
_lowercase = init
_lowercase = init_range
_lowercase = proj_init_std
_lowercase = init_std
_lowercase = layer_norm_epsilon
super().__init__(eos_token_id=UpperCAmelCase , **UpperCAmelCase )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def _UpperCAmelCase ( self , UpperCAmelCase ):
'''simple docstring'''
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 398
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_: Tuple = logging.get_logger(__name__)
A_: str = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = 'bridgetower_vision_model'
def __init__( self , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=288 , UpperCAmelCase=1 , UpperCAmelCase=1e-05 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=False , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase )
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_channels
_lowercase = patch_size
_lowercase = image_size
_lowercase = initializer_factor
_lowercase = layer_norm_eps
_lowercase = stop_gradient
_lowercase = share_layernorm
_lowercase = remove_last_layer
@classmethod
def _UpperCAmelCase ( cls , UpperCAmelCase , **UpperCAmelCase ):
'''simple docstring'''
_lowercase , _lowercase = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
if config_dict.get("""model_type""" ) == "bridgetower":
_lowercase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = 'bridgetower_text_model'
def __init__( self , UpperCAmelCase=50265 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=1 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=514 , UpperCAmelCase=1 , UpperCAmelCase=1e-05 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase="absolute" , UpperCAmelCase=True , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = initializer_factor
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = layer_norm_eps
_lowercase = position_embedding_type
_lowercase = use_cache
_lowercase = pad_token_id
_lowercase = bos_token_id
_lowercase = eos_token_id
@classmethod
def _UpperCAmelCase ( cls , UpperCAmelCase , **UpperCAmelCase ):
'''simple docstring'''
_lowercase , _lowercase = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
if config_dict.get("""model_type""" ) == "bridgetower":
_lowercase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = 'bridgetower'
def __init__( self , UpperCAmelCase=True , UpperCAmelCase="gelu" , UpperCAmelCase=768 , UpperCAmelCase=1 , UpperCAmelCase=1e-05 , UpperCAmelCase=False , UpperCAmelCase="add" , UpperCAmelCase=12 , UpperCAmelCase=6 , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ):
'''simple docstring'''
_lowercase = kwargs.pop("""text_config_dict""" , UpperCAmelCase )
_lowercase = kwargs.pop("""vision_config_dict""" , UpperCAmelCase )
super().__init__(**UpperCAmelCase )
_lowercase = share_cross_modal_transformer_layers
_lowercase = hidden_act
_lowercase = hidden_size
_lowercase = initializer_factor
_lowercase = layer_norm_eps
_lowercase = share_link_tower_layers
_lowercase = link_tower_type
_lowercase = num_attention_heads
_lowercase = num_hidden_layers
_lowercase = tie_word_embeddings
_lowercase = init_layernorm_from_vision_encoder
if text_config is None:
_lowercase = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_lowercase = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_lowercase = BridgeTowerTextConfig(**UpperCAmelCase )
_lowercase = BridgeTowerVisionConfig(**UpperCAmelCase )
@classmethod
def _UpperCAmelCase ( cls , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = copy.deepcopy(self.__dict__ )
_lowercase = self.text_config.to_dict()
_lowercase = self.vision_config.to_dict()
_lowercase = self.__class__.model_type
return output
| 398
| 1
|
'''simple docstring'''
def __magic_name__( lowerCamelCase = 2_0_0_0_0_0_0):
__lowerCAmelCase = [0 for i in range(n + 1)]
__lowerCAmelCase = 1
__lowerCAmelCase = 1
for i in range(2, int(n**0.5) + 1):
if primality_list[i] == 0:
for j in range(i * i, n + 1, lowerCamelCase):
__lowerCAmelCase = 1
__lowerCAmelCase = 0
for i in range(lowerCamelCase):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 711
|
'''simple docstring'''
def __magic_name__( ):
return [
a * b * (1_0_0_0 - a - b)
for a in range(1, 9_9_9)
for b in range(lowerCamelCase, 9_9_9)
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 474
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Any = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 394
|
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__A : Optional[Any] = logging.getLogger(__name__)
def lowerCAmelCase_ ( ):
a__ = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=a , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=a , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=a , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=a , default=1000 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=a , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=a , type=a , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=a , default=512 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=a , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
a__ = parser.parse_args()
return args
def lowerCAmelCase_ ( a : int ):
def fn(a : int ):
return tokenizer(examples['text'] )
return fn
def lowerCAmelCase_ ( a : str ):
a__ = []
for i in range(len(tokenized_data['input_ids'] ) ):
a__ = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
a__ = tf.train.Features(feature=a )
a__ = tf.train.Example(features=a )
a__ = example.SerializeToString()
records.append(a )
return records
def lowerCAmelCase_ ( a : Any ):
a__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
a__ = min(len(a ) , args.limit )
a__ = dataset.select(range(a ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
a__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
a__ = os.path.join(args.output_dir , args.split )
if not os.path.exists(a ):
os.makedirs(a )
else:
a__ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
a__ = tokenize_function(a )
a__ = dataset.map(a , batched=a , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(a : Optional[Any] ):
# Concatenate all texts.
a__ = {k: sum(examples[k] , [] ) for k in examples.keys()}
a__ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
a__ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
a__ = {
k: [t[i : i + args.max_length] for i in range(0 , a , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
a__ = dataset_tokenized.map(a , batched=a , batch_size=1000 , num_proc=4 )
a__ = 0
a__ = 0
for shard in range(0 , len(a ) , args.shard_size ):
a__ = grouped_dataset[shard : shard + args.shard_size]
a__ = len(dataset_snapshot['input_ids'] )
a__ = os.path.join(a , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
a__ = get_serialized_examples(a )
with tf.io.TFRecordWriter(a ) as out_file:
for i in range(len(a ) ):
a__ = serialized_examples[i]
out_file.write(a )
print('Wrote file {} containing {} records'.format(a , a ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , 'w' ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=a )
if __name__ == "__main__":
__A : str = parse_args()
main(args)
| 394
| 1
|
'''simple docstring'''
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self , __A , __A , __A ) -> int:
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for a, b in zip(_lowercase , _lowercase ):
self.assertAlmostEqual(_lowercase , _lowercase , delta=_lowercase )
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_lowercase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =None
ops.enable_eager_execution_internal()
_lowerCAmelCase =tf.config.list_physical_devices('CPU' )
if len(_lowercase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
_lowerCAmelCase =tf.config.list_logical_devices(device_type='CPU' )
_lowerCAmelCase =tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
_lowerCAmelCase =GradientAccumulator()
_lowerCAmelCase =tf.Variable([4.0, 3.0] )
_lowerCAmelCase , _lowerCAmelCase =create_optimizer(5E-5 , 10 , 5 )
_lowerCAmelCase =tf.Variable([0.0, 0.0] , trainable=_lowercase )
def accumulate_on_replica(__A ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(__A , __A ):
with strategy.scope():
_lowerCAmelCase =strategy.experimental_local_results(_lowercase )
local_variables[0].assign(_lowercase )
local_variables[1].assign(_lowercase )
strategy.run(_lowercase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_lowercase )
def _check_local_values(__A , __A ):
_lowerCAmelCase =strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _lowercase , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , _lowercase , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 702
|
'''simple docstring'''
from __future__ import annotations
lowercase_ = 10
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =1
_lowerCAmelCase =max(a__ )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase =[[] for _ in range(a__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase =int((i / placement) % RADIX )
buckets[tmp].append(a__ )
# put each buckets' contents into list_of_ints
_lowerCAmelCase =0
for b in range(a__ ):
for i in buckets[b]:
_lowerCAmelCase =i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( __snake_case : int , __snake_case : List[str] , __snake_case : Dict , __snake_case : Tuple , __snake_case : List[str] ) -> str:
'''simple docstring'''
snake_case__ :Dict = TapasConfig.from_json_file(__snake_case )
# set absolute/relative position embeddings parameter
snake_case__ :Any = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
snake_case__ :int = TapasForQuestionAnswering(config=__snake_case )
elif task == "WTQ":
# run_task_main.py hparams
snake_case__ :Dict = 4
snake_case__ :int = True
# hparam_utils.py hparams
snake_case__ :Any = 0.6_6_4_6_9_4
snake_case__ :str = 0.2_0_7_9_5_1
snake_case__ :Optional[Any] = 0.1_2_1_1_9_4
snake_case__ :Dict = True
snake_case__ :Union[str, Any] = True
snake_case__ :Dict = False
snake_case__ :Optional[int] = 0.0_3_5_2_5_1_3
snake_case__ :Any = TapasForQuestionAnswering(config=__snake_case )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
snake_case__ :List[str] = 4
snake_case__ :List[str] = False
# hparam_utils.py hparams
snake_case__ :Union[str, Any] = 3_6.4_5_1_9
snake_case__ :Optional[Any] = 0.9_0_3_4_2_1
snake_case__ :List[Any] = 2_2_2.0_8_8
snake_case__ :Dict = True
snake_case__ :str = True
snake_case__ :Any = True
snake_case__ :List[Any] = 0.7_6_3_1_4_1
snake_case__ :Union[str, Any] = TapasForQuestionAnswering(config=__snake_case )
elif task == "TABFACT":
snake_case__ :Optional[int] = TapasForSequenceClassification(config=__snake_case )
elif task == "MLM":
snake_case__ :Any = TapasForMaskedLM(config=__snake_case )
elif task == "INTERMEDIATE_PRETRAINING":
snake_case__ :Optional[int] = TapasModel(config=__snake_case )
else:
raise ValueError(F'Task {task} not supported.' )
print(F'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__snake_case , __snake_case , __snake_case )
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__snake_case )
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}' )
snake_case__ :Optional[int] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=5_12 )
tokenizer.save_pretrained(__snake_case )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCAmelCase : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 241
|
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowerCamelCase_ ( self: Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowercase__ = '''xvjiarui/stable-diffusion-2-inpainting'''
lowercase__ , lowercase__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
lowercase__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = num_samples * [init_image]
lowercase__ = num_samples * [mask_image]
lowercase__ , lowercase__ , lowercase__ = pipeline.prepare_inputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# shard inputs and rng
lowercase__ = replicate(UpperCamelCase_ )
lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = pipeline(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ )
lowercase__ = output.images.reshape(UpperCamelCase_ , 512 , 512 , 3 )
lowercase__ = images[0, 253:256, 253:256, -1]
lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 43
| 0
|
from itertools import product
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = sides_number
UpperCAmelCase_ : List[str] = max_face_number * dice_number
UpperCAmelCase_ : Optional[Any] = [0] * (max_total + 1)
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Union[str, Any] = range(UpperCamelCase__ , max_face_number + 1 )
for dice_numbers in product(UpperCamelCase__ , repeat=UpperCamelCase__ ):
UpperCAmelCase_ : Dict = sum(UpperCamelCase__ )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = total_frequency_distribution(
sides_number=4 , dice_number=9 )
UpperCAmelCase_ : Optional[int] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : Optional[Any] = 9
UpperCAmelCase_ : Union[str, Any] = 4 * 9
UpperCAmelCase_ : Optional[int] = 6
for peter_total in range(UpperCamelCase__ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
UpperCAmelCase_ : str = (4**9) * (6**6)
UpperCAmelCase_ : List[Any] = peter_wins_count / total_games_number
UpperCAmelCase_ : List[str] = round(UpperCamelCase__ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 703
|
from math import ceil
def lowerCamelCase__ ( _lowercase = 1001 ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
UpperCAmelCase_ : Tuple = 2 * i + 1
UpperCAmelCase_ : List[str] = 2 * i
UpperCAmelCase_ : List[str] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__a = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 300
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__: Tuple = logging.get_logger(__name__)
lowerCAmelCase__: Union[str, Any] = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class snake_case_ ( lowerCAmelCase ):
__lowerCamelCase : List[Any] = 'van'
def __init__( self , __lowerCAmelCase=224 , __lowerCAmelCase=3 , __lowerCAmelCase=[7, 3, 3, 3] , __lowerCAmelCase=[4, 2, 2, 2] , __lowerCAmelCase=[64, 128, 320, 512] , __lowerCAmelCase=[3, 3, 12, 3] , __lowerCAmelCase=[8, 8, 4, 4] , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.02 , __lowerCAmelCase=1e-6 , __lowerCAmelCase=1e-2 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ):
super().__init__(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_ : int = patch_sizes
SCREAMING_SNAKE_CASE_ : Union[str, Any] = strides
SCREAMING_SNAKE_CASE_ : List[str] = hidden_sizes
SCREAMING_SNAKE_CASE_ : Optional[Any] = depths
SCREAMING_SNAKE_CASE_ : int = mlp_ratios
SCREAMING_SNAKE_CASE_ : str = hidden_act
SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Any = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Optional[int] = layer_scale_init_value
SCREAMING_SNAKE_CASE_ : Optional[Any] = drop_path_rate
SCREAMING_SNAKE_CASE_ : Any = dropout_rate
| 345
|
import warnings
from functools import wraps
from typing import Callable
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Callable:
@wraps(SCREAMING_SNAKE_CASE )
def _inner_fn(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
warnings.warn(
(f'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , SCREAMING_SNAKE_CASE , )
return fn(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return _inner_fn
| 345
| 1
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _a ( A__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case , "tf_padding" ) )
self.parent.assertTrue(hasattr(_snake_case , "depth_multiplier" ) )
class _a :
"""simple docstring"""
def __init__( self , _snake_case , _snake_case=13 , _snake_case=3 , _snake_case=32 , _snake_case=0.25 , _snake_case=8 , _snake_case=8 , _snake_case=6 , _snake_case=32 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case="relu6" , _snake_case=1280 , _snake_case=0.1 , _snake_case=0.02 , _snake_case=True , _snake_case=True , _snake_case=10 , _snake_case=None , ):
_UpperCAmelCase =parent
_UpperCAmelCase =batch_size
_UpperCAmelCase =num_channels
_UpperCAmelCase =image_size
_UpperCAmelCase =depth_multiplier
_UpperCAmelCase =depth_divisible_by
_UpperCAmelCase =min_depth
_UpperCAmelCase =expand_ratio
_UpperCAmelCase =tf_padding
_UpperCAmelCase =output_stride
_UpperCAmelCase =first_layer_is_expansion
_UpperCAmelCase =finegrained_output
_UpperCAmelCase =hidden_act
_UpperCAmelCase =last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_UpperCAmelCase =classifier_dropout_prob
_UpperCAmelCase =use_labels
_UpperCAmelCase =is_training
_UpperCAmelCase =num_labels
_UpperCAmelCase =initializer_range
_UpperCAmelCase =scope
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase =None
_UpperCAmelCase =None
if self.use_labels:
_UpperCAmelCase =ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase =self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE ( self ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =MobileNetVaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
_UpperCAmelCase =model(_snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =self.num_labels
_UpperCAmelCase =MobileNetVaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
_UpperCAmelCase =model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =self.num_labels
_UpperCAmelCase =MobileNetVaForSemanticSegmentation(_snake_case )
model.to(_snake_case )
model.eval()
_UpperCAmelCase =model(_snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_UpperCAmelCase =model(_snake_case , labels=_snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase =config_and_inputs
_UpperCAmelCase ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _a ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
snake_case =(
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
snake_case =(
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =MobileNetVaModelTester(self )
_UpperCAmelCase =MobileNetVaConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV2 does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@unittest.skip(reason="MobileNetV2 does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@unittest.skip(reason="MobileNetV2 does not output attentions" )
def SCREAMING_SNAKE_CASE ( self ):
pass
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase , _UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase =model_class(_snake_case )
_UpperCAmelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase =[*signature.parameters.keys()]
_UpperCAmelCase =["pixel_values"]
self.assertListEqual(arg_names[:1] , _snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
def check_hidden_states_output(_snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
_UpperCAmelCase =model(**self._prepare_for_class(_snake_case , _snake_case ) )
_UpperCAmelCase =outputs.hidden_states
_UpperCAmelCase =16
self.assertEqual(len(_snake_case ) , _snake_case )
_UpperCAmelCase , _UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase =True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase =True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase =MobileNetVaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def lowerCamelCase__ ( ) ->List[str]:
_UpperCAmelCase =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self ):
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224" ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224" ).to(_snake_case )
_UpperCAmelCase =self.default_image_processor
_UpperCAmelCase =prepare_img()
_UpperCAmelCase =image_processor(images=_snake_case , return_tensors="pt" ).to(_snake_case )
# forward pass
with torch.no_grad():
_UpperCAmelCase =model(**_snake_case )
# verify the logits
_UpperCAmelCase =torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , _snake_case )
_UpperCAmelCase =torch.tensor([0.2_445, -1.1_993, 0.1_905] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
_UpperCAmelCase =model.to(_snake_case )
_UpperCAmelCase =MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
_UpperCAmelCase =prepare_img()
_UpperCAmelCase =image_processor(images=_snake_case , return_tensors="pt" ).to(_snake_case )
# forward pass
with torch.no_grad():
_UpperCAmelCase =model(**_snake_case )
_UpperCAmelCase =outputs.logits
# verify the logits
_UpperCAmelCase =torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , _snake_case )
_UpperCAmelCase =torch.tensor(
[
[[17.5_790, 17.7_581, 18.3_355], [18.3_257, 18.4_230, 18.8_973], [18.6_169, 18.8_650, 19.2_187]],
[[-2.1_595, -2.0_977, -2.3_741], [-2.4_226, -2.3_028, -2.6_835], [-2.7_819, -2.5_991, -2.7_706]],
[[4.2_058, 4.8_317, 4.7_638], [4.4_136, 5.0_361, 4.9_383], [4.5_028, 4.9_644, 4.8_734]],
] , device=_snake_case , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _snake_case , atol=1E-4 ) )
| 592
|
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->bool:
_UpperCAmelCase =get_failure_array(_lowerCamelCase )
# 2) Step through text searching for pattern
_UpperCAmelCase , _UpperCAmelCase =0, 0 # index into text, pattern
while i < len(_lowerCamelCase ):
if pattern[j] == text[i]:
if j == (len(_lowerCamelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_UpperCAmelCase =failure[j - 1]
continue
i += 1
return False
def lowerCamelCase__ ( _lowerCamelCase ) ->list[int]:
_UpperCAmelCase =[0]
_UpperCAmelCase =0
_UpperCAmelCase =1
while j < len(_lowerCamelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_UpperCAmelCase =failure[i - 1]
continue
j += 1
failure.append(_lowerCamelCase )
return failure
if __name__ == "__main__":
# Test 1)
snake_case__ : Dict = 'abc1abc12'
snake_case__ : Tuple = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
snake_case__ : int = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
snake_case__ : Tuple = 'ABABX'
snake_case__ : Any = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
snake_case__ : Optional[int] = 'AAAB'
snake_case__ : Optional[Any] = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
snake_case__ : int = 'abcdabcy'
snake_case__ : int = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
snake_case__ : str = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 592
| 1
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=2 , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=10 , _UpperCamelCase=3 , _UpperCamelCase=32 * 4 , _UpperCamelCase=32 * 6 , _UpperCamelCase=4 , _UpperCamelCase=32 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = is_training
_UpperCAmelCase = use_auxiliary_loss
_UpperCAmelCase = num_queries
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_size
_UpperCAmelCase = max_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = mask_feature_size
def UpperCamelCase( self ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_UpperCamelCase )
_UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCamelCase )
_UpperCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCamelCase ) > 0.5
).float()
_UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCamelCase ) > 0.5).long()
_UpperCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCamelCase( self ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = output.encoder_hidden_states
_UpperCAmelCase = output.pixel_decoder_hidden_states
_UpperCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_UpperCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCamelCase ) , config.decoder_config.decoder_layers )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
with torch.no_grad():
_UpperCAmelCase = MaskFormerModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(pixel_values=_UpperCamelCase , pixel_mask=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , output_hidden_states=_UpperCamelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = MaskFormerForInstanceSegmentation(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
def comm_check_on_output(_UpperCamelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase = model(pixel_values=_UpperCamelCase , pixel_mask=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase )
comm_check_on_output(_UpperCamelCase )
_UpperCAmelCase = model(
pixel_values=_UpperCamelCase , pixel_mask=_UpperCamelCase , mask_labels=_UpperCamelCase , class_labels=_UpperCamelCase )
comm_check_on_output(_UpperCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __UpperCamelCase ( A__ , A__ , unittest.TestCase ):
__A : int = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__A : int = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__A : List[str] = False
__A : Optional[Any] = False
__A : Union[str, Any] = False
__A : Optional[Any] = False
def UpperCamelCase( self ):
_UpperCAmelCase = MaskFormerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase )
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_UpperCamelCase , **_UpperCamelCase , output_hidden_states=_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_UpperCamelCase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def UpperCamelCase( self ):
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def UpperCamelCase( self ):
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def UpperCamelCase( self ):
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def UpperCamelCase( self ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def UpperCamelCase( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase( self ):
pass
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
_UpperCAmelCase = MaskFormerModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = (self.model_tester.min_size,) * 2
_UpperCAmelCase = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_UpperCamelCase ),
'''mask_labels''': torch.randn((2, 10, *size) , device=_UpperCamelCase ),
'''class_labels''': torch.zeros(2 , 10 , device=_UpperCamelCase ).long(),
}
_UpperCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_UpperCamelCase )
_UpperCAmelCase = model(**_UpperCamelCase )
self.assertTrue(outputs.loss is not None )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_UpperCamelCase , **_UpperCamelCase , output_hidden_states=_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase ).to(_UpperCamelCase )
_UpperCAmelCase = model(**_UpperCamelCase , output_attentions=_UpperCamelCase )
self.assertTrue(outputs.attentions is not None )
def UpperCamelCase( self ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_UpperCAmelCase = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
_UpperCAmelCase = model(_UpperCamelCase , mask_labels=_UpperCamelCase , class_labels=_UpperCamelCase ).loss
loss.backward()
def UpperCamelCase( self ):
# only MaskFormerForInstanceSegmentation has the loss
_UpperCAmelCase = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
_UpperCAmelCase = model(_UpperCamelCase , mask_labels=_UpperCamelCase , class_labels=_UpperCamelCase )
_UpperCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_UpperCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_UpperCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase_ = 1e-4
def A__ ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase( self ):
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def UpperCamelCase( self ):
_UpperCAmelCase = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(_UpperCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
_UpperCAmelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_UpperCAmelCase = model(**_UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
_UpperCAmelCase = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
_UpperCAmelCase = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
def UpperCamelCase( self ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(_UpperCamelCase )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
_UpperCAmelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_UpperCAmelCase = model(**_UpperCamelCase )
# masks_queries_logits
_UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_UpperCAmelCase = [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
_UpperCAmelCase = torch.tensor(_UpperCamelCase ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
# class_queries_logits
_UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase = torch.tensor(
[
[1.6512e00, -5.2572e00, -3.3519e00],
[3.6169e-02, -5.9025e00, -2.9313e00],
[1.0766e-04, -7.7630e00, -5.1263e00],
] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
def UpperCamelCase( self ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(_UpperCamelCase )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
_UpperCAmelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_UpperCAmelCase = model(**_UpperCamelCase )
# masks_queries_logits
_UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_UpperCAmelCase = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
_UpperCAmelCase = torch.tensor(_UpperCamelCase ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
# class_queries_logits
_UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
def UpperCamelCase( self ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(_UpperCamelCase )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
_UpperCAmelCase = inputs['''pixel_values'''].to(_UpperCamelCase )
_UpperCAmelCase = [el.to(_UpperCamelCase ) for el in inputs['''mask_labels''']]
_UpperCAmelCase = [el.to(_UpperCamelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
_UpperCAmelCase = model(**_UpperCamelCase )
self.assertTrue(outputs.loss is not None )
| 32
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : str = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Any = """trocr"""
_lowercase : Optional[Any] = ["""past_key_values"""]
_lowercase : List[str] = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self , lowerCAmelCase__=5_0_2_6_5 , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=4_0_9_6 , lowerCAmelCase__="gelu" , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , **lowerCAmelCase__ , ) -> Any:
'''simple docstring'''
a__ : List[Any] =vocab_size
a__ : str =d_model
a__ : str =decoder_layers
a__ : Union[str, Any] =decoder_attention_heads
a__ : str =decoder_ffn_dim
a__ : List[str] =activation_function
a__ : List[Any] =max_position_embeddings
a__ : int =dropout
a__ : List[Any] =attention_dropout
a__ : Any =activation_dropout
a__ : Union[str, Any] =init_std
a__ : List[Any] =decoder_layerdrop
a__ : int =use_cache
a__ : str =scale_embedding
a__ : Tuple =use_learned_position_embeddings
a__ : Union[str, Any] =layernorm_embedding
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 563
| 0
|
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
_lowerCamelCase = {'allegro/herbert-base-cased': 514}
_lowerCamelCase = {}
class lowerCamelCase_ ( lowercase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
_lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Any = HerbertTokenizer
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="<s>" , UpperCAmelCase__="<unk>" , UpperCAmelCase__="<pad>" , UpperCAmelCase__="<mask>" , UpperCAmelCase__="</s>" , **UpperCAmelCase__ , ):
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase__ )) + [1]
return [1] + ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1]
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 112
|
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default='ubuntu')
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('--key_path', type=str, default=None)
parser.add_argument('--instance', type=str, default='V100:1')
parser.add_argument('--provider', type=str, default='cheapest')
parser.add_argument('--use_spot', type=bool, default=False)
parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py')
_lowerCamelCase , _lowerCamelCase = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('Cannot specify both BYO and on-demand cluster args')
_lowerCamelCase = rh.cluster(
name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path}
)
else:
_lowerCamelCase = rh.cluster(
name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_lowerCamelCase = args.example.rsplit('/', 1)[0]
# Set up remote environment
cluster.install_packages(['pip:./']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"""python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 112
| 1
|
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
a__ = '''Usage of script: script_name <size_of_canvas:int>'''
a__ = [0] * 100 + [1] * 10
random.shuffle(choice)
def __UpperCAmelCase ( __a : int ) -> list[list[bool]]:
"""simple docstring"""
_a : int = [[False for i in range(__a )] for j in range(__a )]
return canvas
def __UpperCAmelCase ( __a : list[list[bool]] ) -> None:
"""simple docstring"""
for i, row in enumerate(__a ):
for j, _ in enumerate(__a ):
_a : Optional[int] = bool(random.getrandbits(1 ) )
def __UpperCAmelCase ( __a : list[list[bool]] ) -> list[list[bool]]:
"""simple docstring"""
_a : Any = np.array(__a )
_a : Optional[int] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__a ):
for c, pt in enumerate(__a ):
_a : Tuple = __judge_point(
__a ,current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_a : List[str] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_a : list[list[bool]] = current_canvas.tolist()
return return_canvas
def __UpperCAmelCase ( __a : bool ,__a : list[list[bool]] ) -> bool:
"""simple docstring"""
_a : Optional[Any] = 0
_a : str = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_a : Optional[int] = pt
if pt:
if alive < 2:
_a : Dict = False
elif alive == 2 or alive == 3:
_a : Optional[Any] = True
elif alive > 3:
_a : str = False
else:
if alive == 3:
_a : int = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
a__ = int(sys.argv[1])
# main working structure of this module.
a__ = create_canvas(canvas_size)
seed(c)
a__ , a__ = plt.subplots()
fig.show()
a__ = ListedColormap(['''w''', '''k'''])
try:
while True:
a__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 14
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = "mobilenet_v1"
def __init__( self , _a=3 , _a=2_2_4 , _a=1.0 , _a=8 , _a="relu6" , _a=True , _a=0.999 , _a=0.02 , _a=0.001 , **_a , ) -> List[Any]:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_a : Tuple = num_channels
_a : str = image_size
_a : Tuple = depth_multiplier
_a : Any = min_depth
_a : int = hidden_act
_a : Optional[Any] = tf_padding
_a : str = classifier_dropout_prob
_a : Optional[int] = initializer_range
_a : Any = layer_norm_eps
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = version.parse("1.11" )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> float:
return 1e-4
| 14
| 1
|
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '''T5Config'''
class _snake_case ( __lowerCamelCase ):
"""simple docstring"""
a = "mt5"
a = MTaConfig
class _snake_case ( __lowerCamelCase ):
"""simple docstring"""
a = "mt5"
a = MTaConfig
class _snake_case ( __lowerCamelCase ):
"""simple docstring"""
a = "mt5"
a = MTaConfig
| 720
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=_A , )
assert hasattr(self , """env""")
def _lowerCAmelCase ( self : Union[str, Any] , _A : str=1):
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , )
def _lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any]):
"""simple docstring"""
TrainingJobAnalytics(_A).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""")
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_SCREAMING_SNAKE_CASE : Any = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_SCREAMING_SNAKE_CASE : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""])
_SCREAMING_SNAKE_CASE : Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_SCREAMING_SNAKE_CASE : int = (
Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy)
assert all(t <= self.results["""eval_loss"""] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""") as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _A)
| 635
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowerCamelCase : List[Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : str , UpperCamelCase__ : List[str]=7 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Optional[Any]=1_8 , UpperCamelCase__ : List[Any]=3_0 , UpperCamelCase__ : List[Any]=4_0_0 , UpperCamelCase__ : int=None , UpperCamelCase__ : int=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : str=None , ):
"""simple docstring"""
UpperCamelCase = size if size is not None else {'height': 2_0, 'width': 2_0}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = size
UpperCamelCase = do_normalize
UpperCamelCase = do_convert_rgb
UpperCamelCase = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
UpperCamelCase = patch_size if patch_size is not None else {'height': 1_6, 'width': 1_6}
def A ( self : Any ):
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
UpperCamelCase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PixaStructImageProcessor if is_vision_available() else None
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = PixaStructImageProcessingTester(self )
@property
def A ( self : List[str] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase_ , 'do_convert_rgb' ) )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.image_processor_tester.prepare_dummy_image()
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase = 2_0_4_8
UpperCamelCase = image_processor(lowercase_ , return_tensors='pt' , max_patches=lowercase_ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_6_0_6 ) , atol=1E-3 , rtol=1E-3 ) )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
UpperCamelCase = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase = image_processor(
lowercase_ , return_tensors='pt' , max_patches=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
UpperCamelCase = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
UpperCamelCase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(lowercase_ ):
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowercase_ ).flattened_patches
UpperCamelCase = 'Hello'
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowercase_ , header_text=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase = image_processor(
lowercase_ , return_tensors='pt' , max_patches=lowercase_ , header_text=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
UpperCamelCase = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase = image_processor(
lowercase_ , return_tensors='pt' , max_patches=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
UpperCamelCase = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase = image_processor(
lowercase_ , return_tensors='pt' , max_patches=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PixaStructImageProcessor if is_vision_available() else None
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCamelCase = 3
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase_ , 'do_convert_rgb' ) )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
UpperCamelCase = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase = image_processor(
lowercase_ , return_tensors='pt' , max_patches=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 430
|
from manim import *
class lowercase_ (lowercase__ ):
def __UpperCamelCase ( self) -> List[Any]:
a__ =Rectangle(height=0.5 , width=0.5)
a__ =Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
a__ =[mem.copy() for i in range(6)]
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('CPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(4)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('GPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
gpu.move_to([-1, -1, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Model' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
model.move_to([3, -1.0, 0])
self.add(lowercase_)
a__ =[]
for i, rect in enumerate(lowercase_):
rect.set_stroke(lowercase_)
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
a__ =Rectangle(height=0.46 / 4 , width=0.46 / 3).set_stroke(width=0.0).set_fill(lowercase_ , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=lowercase_)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0)
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0)
self.add(lowercase_)
cpu_targs.append(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Loaded Checkpoint' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4)
checkpoint.move_to([3, 0.5, 0])
a__ =Square(side_length=2.2)
key.move_to([-5, 2, 0])
a__ =MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(lowercase_ , lowercase_)
a__ =MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left())
a__ =MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(lowercase_) , Write(lowercase_))
self.play(Write(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1))
a__ =[]
a__ =[]
for i, rect in enumerate(lowercase_):
a__ =fill.copy().set_fill(lowercase_ , opacity=0.7)
target.move_to(lowercase_)
first_animations.append(GrowFromCenter(lowercase_ , run_time=1))
a__ =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5])
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5))
self.play(*lowercase_)
self.play(*lowercase_)
self.wait()
| 20
| 0
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : List[str] = os.path.abspath(snake_case_ )
logger.info(f'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
_A : List[str] = tf.train.list_variables(snake_case_ )
_A : List[Any] = []
_A : Optional[Any] = []
_A : Union[str, Any] = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
_A : Tuple = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(f'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
_A : int = name[1:]
# figure out how many levels deep the name is
_A : Any = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(snake_case_ )
# read data
_A : List[str] = tf.train.load_variable(snake_case_,snake_case_ )
names.append("""/""".join(snake_case_ ) )
arrays.append(snake_case_ )
logger.info(f'''Read a total of {len(snake_case_ ):,} layers''' )
# Sanity check
if len(set(snake_case_ ) ) != 1:
raise ValueError(f'''Found layer names with different depths (layer depth {list(set(snake_case_ ) )})''' )
_A : List[str] = list(set(snake_case_ ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(snake_case_,snake_case_ ):
_A : Optional[Any] = full_name.split("""/""" )
_A : Any = model
_A : Any = []
for i, m_name in enumerate(snake_case_ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
_A : Optional[Any] = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
_A : int = getattr(snake_case_,"""embeddings""" )
_A : Dict = getattr(snake_case_,"""LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
_A : Any = getattr(snake_case_,"""encoder""" )
_A : Tuple = getattr(snake_case_,"""layer""" )
_A : str = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
_A : str = getattr(snake_case_,"""pooler""" )
_A : Union[str, Any] = getattr(snake_case_,"""dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
_A : List[str] = getattr(snake_case_,"""embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
_A : Optional[Any] = getattr(snake_case_,"""word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
_A : Optional[Any] = getattr(snake_case_,"""position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
_A : Optional[Any] = getattr(snake_case_,"""token_type_embeddings""" )
else:
raise ValueError(f'''Unknown embedding layer with name {full_name}''' )
trace.append("""weight""" )
_A : Any = getattr(snake_case_,"""weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
_A : Optional[int] = getattr(snake_case_,"""attention""" )
_A : List[Any] = getattr(snake_case_,"""self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
_A : Optional[int] = getattr(snake_case_,"""attention""" )
_A : Tuple = getattr(snake_case_,"""output""" )
_A : int = getattr(snake_case_,"""LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
_A : Optional[Any] = getattr(snake_case_,"""attention""" )
_A : Any = getattr(snake_case_,"""output""" )
_A : Optional[int] = getattr(snake_case_,"""dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
_A : Any = getattr(snake_case_,"""output""" )
_A : Union[str, Any] = getattr(snake_case_,"""dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
_A : str = getattr(snake_case_,"""output""" )
_A : Union[str, Any] = getattr(snake_case_,"""LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
_A : Optional[int] = getattr(snake_case_,"""key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
_A : Optional[int] = getattr(snake_case_,"""query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
_A : Optional[Any] = getattr(snake_case_,"""value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
_A : Dict = getattr(snake_case_,"""intermediate""" )
_A : Union[str, Any] = getattr(snake_case_,"""dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
_A : int = getattr(snake_case_,"""output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
_A : List[str] = getattr(snake_case_,"""bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
_A : Optional[Any] = getattr(snake_case_,"""weight""" )
else:
logger.warning(f'''Ignored {m_name}''' )
# for certain layers reshape is necessary
_A : List[str] = """.""".join(snake_case_ )
if re.match(r"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""",snake_case_ ) or re.match(
r"""(\S+)\.attention\.output\.dense\.weight""",snake_case_ ):
_A : str = array.reshape(pointer.data.shape )
if "kernel" in full_name:
_A : Tuple = array.transpose()
if pointer.shape == array.shape:
_A : Union[str, Any] = torch.from_numpy(snake_case_ )
else:
raise ValueError(
f'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
f''' {array.shape}''' )
logger.info(f'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# Instantiate model
logger.info(f'''Loading model based on config from {config_path}...''' )
_A : Tuple = BertConfig.from_json_file(snake_case_ )
_A : Union[str, Any] = BertModel(snake_case_ )
# Load weights from checkpoint
logger.info(f'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(snake_case_,snake_case_,snake_case_ )
# Save pytorch-model
logger.info(f'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict(),snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model (must include filename).",
)
_snake_case = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 700
|
import operator
def lowerCAmelCase_ ( snake_case_,snake_case_ = False,snake_case_ = None ):
_A : str = operator.lt if reverse else operator.gt
_A : Optional[Any] = solution or []
if not arr:
return solution
_A : Dict = [arr.pop(0 )]
for i, item in enumerate(snake_case_ ):
if _operator(snake_case_,sublist[-1] ):
sublist.append(snake_case_ )
arr.pop(snake_case_ )
# merging sublist into solution list
if not solution:
solution.extend(snake_case_ )
else:
while sublist:
_A : Union[str, Any] = sublist.pop(0 )
for i, xx in enumerate(snake_case_ ):
if not _operator(snake_case_,snake_case_ ):
solution.insert(snake_case_,snake_case_ )
break
else:
solution.append(snake_case_ )
strand_sort(snake_case_,snake_case_,snake_case_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 54
| 0
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
__A = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , ) ->Any:
"""simple docstring"""
output_path.parent.mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=_SCREAMING_SNAKE_CASE , output_names=_SCREAMING_SNAKE_CASE , dynamic_axes=_SCREAMING_SNAKE_CASE , do_constant_folding=_SCREAMING_SNAKE_CASE , use_external_data_format=_SCREAMING_SNAKE_CASE , enable_onnx_checker=_SCREAMING_SNAKE_CASE , opset_version=_SCREAMING_SNAKE_CASE , )
else:
export(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=_SCREAMING_SNAKE_CASE , output_names=_SCREAMING_SNAKE_CASE , dynamic_axes=_SCREAMING_SNAKE_CASE , do_constant_folding=_SCREAMING_SNAKE_CASE , opset_version=_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) ->List[str]:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCAmelCase__ :Optional[int] = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
lowerCAmelCase__ :Dict = 'cpu'
lowerCAmelCase__ :Any = Path(_SCREAMING_SNAKE_CASE )
# VAE DECODER
lowerCAmelCase__ :Optional[Any] = AutoencoderKL.from_pretrained(model_path + '/vae' )
lowerCAmelCase__ :Optional[Any] = vae_decoder.config.latent_channels
# forward only through the decoder part
lowerCAmelCase__ :Optional[Any] = vae_decoder.decode
onnx_export(
_SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , _SCREAMING_SNAKE_CASE , 25 , 25 ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=_SCREAMING_SNAKE_CASE , )
del vae_decoder
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
__A = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("""SD: Done: ONNX""")
| 93
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Tuple = {
"configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = ["AlbertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = ["AlbertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = [
"FlaxAlbertForMaskedLM",
"FlaxAlbertForMultipleChoice",
"FlaxAlbertForPreTraining",
"FlaxAlbertForQuestionAnswering",
"FlaxAlbertForSequenceClassification",
"FlaxAlbertForTokenClassification",
"FlaxAlbertModel",
"FlaxAlbertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
snake_case_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 488
| 0
|
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
snake_case__ : int = logging.get_logger(__name__)
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """AutoTokenizer"""
A_ = ["""tokenizer"""]
A_ = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=None ) -> int:
super().__init__(_UpperCAmelCase )
UpperCamelCase_ = speaker_embeddings
@classmethod
def _UpperCAmelCase ( cls , _UpperCAmelCase , _UpperCAmelCase="speaker_embeddings_path.json" , **_UpperCAmelCase ) -> Union[str, Any]:
if speaker_embeddings_dict_path is not None:
UpperCamelCase_ = get_file_from_repo(
_UpperCAmelCase , _UpperCAmelCase , subfolder=kwargs.pop('subfolder' , _UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , _UpperCAmelCase ) , force_download=kwargs.pop('force_download' , _UpperCAmelCase ) , proxies=kwargs.pop('proxies' , _UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , _UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , _UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , _UpperCAmelCase ) , revision=kwargs.pop('revision' , _UpperCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
f"""`{os.path.join(_UpperCAmelCase , _UpperCAmelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
UpperCamelCase_ = None
else:
with open(_UpperCAmelCase ) as speaker_embeddings_json:
UpperCamelCase_ = json.load(_UpperCAmelCase )
else:
UpperCamelCase_ = None
UpperCamelCase_ = AutoTokenizer.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
return cls(tokenizer=_UpperCAmelCase , speaker_embeddings=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase="speaker_embeddings_path.json" , _UpperCAmelCase="speaker_embeddings" , _UpperCAmelCase = False , **_UpperCAmelCase , ) -> List[str]:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_UpperCAmelCase , _UpperCAmelCase , 'v2' ) , exist_ok=_UpperCAmelCase )
UpperCamelCase_ = {}
UpperCamelCase_ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
UpperCamelCase_ = self._load_voice_preset(_UpperCAmelCase )
UpperCamelCase_ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , _UpperCAmelCase , f"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=_UpperCAmelCase , )
UpperCamelCase_ = os.path.join(_UpperCAmelCase , f"""{prompt_key}_{key}.npy""" )
UpperCamelCase_ = tmp_dict
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , 'w' ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
super().save_pretrained(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase = None , **_UpperCAmelCase ) -> int:
UpperCamelCase_ = self.speaker_embeddings[voice_preset]
UpperCamelCase_ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
UpperCamelCase_ = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , _UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , _UpperCAmelCase ) , force_download=kwargs.pop('force_download' , _UpperCAmelCase ) , proxies=kwargs.pop('proxies' , _UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , _UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , _UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , _UpperCAmelCase ) , revision=kwargs.pop('revision' , _UpperCAmelCase ) , )
if path is None:
raise ValueError(
f"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
UpperCamelCase_ = np.load(_UpperCAmelCase )
return voice_preset_dict
def _UpperCAmelCase ( self , _UpperCAmelCase = None ) -> Optional[Any]:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="pt" , _UpperCAmelCase=256 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , **_UpperCAmelCase , ) -> Tuple:
if voice_preset is not None and not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
if (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
UpperCamelCase_ = self._load_voice_preset(_UpperCAmelCase )
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not voice_preset.endswith('.npz' ):
UpperCamelCase_ = voice_preset + '.npz'
UpperCamelCase_ = np.load(_UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
UpperCamelCase_ = self.tokenizer(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , padding='max_length' , max_length=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
if voice_preset is not None:
UpperCamelCase_ = voice_preset
return encoded_text
| 700
|
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=32 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=4 , _UpperCAmelCase=[0, 1, 2, 3] , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=3 , _UpperCAmelCase=[1, 384, 24, 24] , _UpperCAmelCase=True , _UpperCAmelCase=None , ) -> Optional[Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = backbone_out_indices
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_labels
UpperCamelCase_ = backbone_featmap_shape
UpperCamelCase_ = scope
UpperCamelCase_ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase_ = (image_size // patch_size) ** 2
UpperCamelCase_ = num_patches + 1
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase_ = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [96, 192, 384, 768],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_UpperCAmelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = DPTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = DPTForDepthEstimation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = DPTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
A_ = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = DPTModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> str:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = True
if model_class in get_values(_UpperCAmelCase ):
continue
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> List[str]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = False
UpperCamelCase_ = True
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(config=_UpperCAmelCase )
# Skip the check for the backbone
UpperCamelCase_ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase_ = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> Any:
pass
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase_ = DPTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = 'add'
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = DPTForDepthEstimation(_UpperCAmelCase )
def _snake_case ():
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
UpperCamelCase_ = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(_UpperCAmelCase )
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
UpperCamelCase_ = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase_ = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _UpperCAmelCase )
UpperCamelCase_ = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _UpperCAmelCase , atol=1e-4 ) )
| 618
| 0
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> str:
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=a_ , )
assert hasattr(self , "env" )
def _a ( self , a_=1 ) -> Union[str, Any]:
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"{self.env.base_job_name}-single" , instance_count=a_ , instance_type=self.instance_type , debugger_hook_config=a_ , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def _a ( self , a_ ) -> Optional[Any]:
TrainingJobAnalytics(a_ ).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv" )
def _a ( self ) -> List[str]:
# create estimator
_UpperCAmelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
_UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_UpperCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , a_ )
| 657
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657
| 1
|
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : int ,lowerCAmelCase_ : int ,lowerCAmelCase_ : bool ,lowerCAmelCase_ : list[int] ,lowerCAmelCase_ : float ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(lowerCAmelCase_ ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 ,node_index * 2 ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ) ,minimax(depth + 1 ,node_index * 2 + 1 ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ) ,)
return min(
minimax(depth + 1 ,node_index * 2 ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ) ,minimax(depth + 1 ,node_index * 2 + 1 ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ) ,)
def SCREAMING_SNAKE_CASE__ ( ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =[90, 23, 6, 33, 21, 65, 123, 3_4423]
SCREAMING_SNAKE_CASE_ : List[Any] =math.log(len(lowerCAmelCase_ ) ,2 )
print('Optimal value : ' ,end='' )
print(minimax(0 ,0 ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 153
|
__SCREAMING_SNAKE_CASE = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__SCREAMING_SNAKE_CASE = [{'type': 'code', 'content': INSTALL_CONTENT}]
__SCREAMING_SNAKE_CASE = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 153
| 1
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
lowerCamelCase : int = pd.read_csv('''sample_data.csv''', header=None)
lowerCamelCase : Any = df.shape[:1][0]
# If you're using some other dataset input the target column
lowerCamelCase : int = df.iloc[:, 1:2]
lowerCamelCase : Tuple = actual_data.values.reshape(len_data, 1)
lowerCamelCase : List[Any] = MinMaxScaler().fit_transform(actual_data)
lowerCamelCase : Tuple = 10
lowerCamelCase : Tuple = 5
lowerCamelCase : Union[str, Any] = 20
lowerCamelCase : Any = len_data - periods * look_back
lowerCamelCase : int = actual_data[:division]
lowerCamelCase : Optional[int] = actual_data[division - look_back :]
lowerCamelCase , lowerCamelCase : str = [], []
lowerCamelCase , lowerCamelCase : Union[str, Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
lowerCamelCase : Union[str, Any] = np.array(train_x)
lowerCamelCase : List[Any] = np.array(test_x)
lowerCamelCase : List[str] = np.array([list(i.ravel()) for i in train_y])
lowerCamelCase : Dict = np.array([list(i.ravel()) for i in test_y])
lowerCamelCase : List[str] = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
lowerCamelCase : List[str] = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
lowerCamelCase : List[str] = model.predict(x_test)
| 367
|
from math import pow, sqrt
def __lowerCAmelCase ( *__snake_case ):
__lowerCAmelCase = len(__snake_case ) > 0 and all(value > 0.0 for value in values )
return result
def __lowerCAmelCase ( __snake_case , __snake_case ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__snake_case , __snake_case )
else ValueError("Input Error: Molar mass values must greater than 0." )
)
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__snake_case , __snake_case , __snake_case )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__snake_case , __snake_case , __snake_case )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__snake_case , __snake_case , __snake_case )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__snake_case , __snake_case , __snake_case )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
| 367
| 1
|
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class a ( unittest.TestCase ,__magic_name__ ):
def __snake_case ( self : Any ):
snake_case : Optional[int] = load_tool('''text-to-speech''' )
self.tool.setup()
def __snake_case ( self : Union[str, Any] ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
snake_case : Optional[int] = self.tool('''hey''' )
snake_case : int = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3], torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ), ) )
def __snake_case ( self : Any ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
snake_case : Optional[Any] = self.tool('''hey''' )
snake_case : Tuple = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3], torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ), ) )
| 555
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
UpperCAmelCase = {
"gpt2": 1_024,
"gpt2-medium": 1_024,
"gpt2-large": 1_024,
"gpt2-xl": 1_024,
"distilgpt2": 1_024,
}
class a ( __magic_name__ ):
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ['''input_ids''', '''attention_mask''']
_snake_case = GPTaTokenizer
def __init__( self : Union[str, Any], SCREAMING_SNAKE_CASE_ : Union[str, Any]=None, SCREAMING_SNAKE_CASE_ : Any=None, SCREAMING_SNAKE_CASE_ : List[str]=None, SCREAMING_SNAKE_CASE_ : Union[str, Any]="<|endoftext|>", SCREAMING_SNAKE_CASE_ : Optional[int]="<|endoftext|>", SCREAMING_SNAKE_CASE_ : Dict="<|endoftext|>", SCREAMING_SNAKE_CASE_ : Union[str, Any]=False, **SCREAMING_SNAKE_CASE_ : Tuple, ):
super().__init__(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
snake_case : Optional[Any] = kwargs.pop('''add_bos_token''', SCREAMING_SNAKE_CASE_ )
snake_case : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
snake_case : List[Any] = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop('''type''' ) )
snake_case : Dict = add_prefix_space
snake_case : List[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = add_prefix_space
def __snake_case ( self : Optional[Any], *SCREAMING_SNAKE_CASE_ : Dict, **SCREAMING_SNAKE_CASE_ : List[str] ):
snake_case : str = kwargs.get('''is_split_into_words''', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any], *SCREAMING_SNAKE_CASE_ : Tuple, **SCREAMING_SNAKE_CASE_ : int ):
snake_case : int = kwargs.get('''is_split_into_words''', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[Any], SCREAMING_SNAKE_CASE_ : str, SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
snake_case : List[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any], SCREAMING_SNAKE_CASE_ : "Conversation" ):
snake_case : int = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_ ) + [self.eos_token_id] )
if len(SCREAMING_SNAKE_CASE_ ) > self.model_max_length:
snake_case : str = input_ids[-self.model_max_length :]
return input_ids
| 555
| 1
|
"""simple docstring"""
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
UpperCAmelCase : Optional[int] = get_logger(__name__)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
lowercase_ = (
os.path.join(_lowerCAmelCase , config.EXTRACTED_DATASETS_DIR) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowercase_ = Extractor
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : str):
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowercase_ = os.path.abspath(_lowerCAmelCase)
return os.path.join(self.extract_dir , hash_url_to_filename(_lowerCAmelCase))
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : bool):
"""simple docstring"""
return force_extract or (
not os.path.isfile(_lowerCAmelCase) and not (os.path.isdir(_lowerCAmelCase) and os.listdir(_lowerCAmelCase))
)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : bool = False):
"""simple docstring"""
lowercase_ = self.extractor.infer_extractor_format(_lowerCAmelCase)
if not extractor_format:
return input_path
lowercase_ = self._get_output_path(_lowerCAmelCase)
if self._do_extract(_lowerCAmelCase , _lowerCAmelCase):
self.extractor.extract(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
return output_path
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
@classmethod
@abstractmethod
def _UpperCAmelCase ( cls : List[Any] , lowerCAmelCase_ : Union[Path, str] , **lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
...
@staticmethod
@abstractmethod
def _UpperCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str]):
"""simple docstring"""
...
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ):
lowercase__ = []
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : int):
"""simple docstring"""
with open(_lowerCAmelCase , """rb""") as f:
return f.read(_lowerCAmelCase)
@classmethod
def _UpperCAmelCase ( cls : Union[str, Any] , lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : bytes = b""):
"""simple docstring"""
if not magic_number:
lowercase_ = max(len(_lowerCAmelCase) for cls_magic_number in cls.magic_numbers)
try:
lowercase_ = cls.read_magic_number(_lowerCAmelCase , _lowerCAmelCase)
except OSError:
return False
return any(magic_number.startswith(_lowerCAmelCase) for cls_magic_number in cls.magic_numbers)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
@classmethod
def _UpperCAmelCase ( cls : List[str] , lowerCAmelCase_ : Union[Path, str] , **lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
return tarfile.is_tarfile(_lowerCAmelCase)
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
def resolved(lowerCAmelCase_ : str) -> str:
return os.path.realpath(os.path.abspath(_lowerCAmelCase))
def badpath(lowerCAmelCase_ : str , lowerCAmelCase_ : str) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_lowerCAmelCase , _lowerCAmelCase)).startswith(_lowerCAmelCase)
def badlink(lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str) -> bool:
# Links are interpreted relative to the directory containing the link
lowercase_ = resolved(os.path.join(_lowerCAmelCase , os.path.dirname(info.name)))
return badpath(info.linkname , base=_lowerCAmelCase)
lowercase_ = resolved(_lowerCAmelCase)
for finfo in members:
if badpath(finfo.name , _lowerCAmelCase):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''')
elif finfo.issym() and badlink(_lowerCAmelCase , _lowerCAmelCase):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''')
elif finfo.islnk() and badlink(_lowerCAmelCase , _lowerCAmelCase):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''')
else:
yield finfo
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str]):
"""simple docstring"""
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase)
lowercase_ = tarfile.open(_lowerCAmelCase)
tar_file.extractall(_lowerCAmelCase , members=TarExtractor.safemembers(_lowerCAmelCase , _lowerCAmelCase))
tar_file.close()
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = [b"\x1F\x8B"]
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str]):
"""simple docstring"""
with gzip.open(_lowerCAmelCase , """rb""") as gzip_file:
with open(_lowerCAmelCase , """wb""") as extracted_file:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def _UpperCAmelCase ( cls : Optional[int] , lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : bytes = b""):
"""simple docstring"""
if super().is_extractable(_lowerCAmelCase , magic_number=_lowerCAmelCase):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_lowerCAmelCase , """rb""") as fp:
lowercase_ = _EndRecData(_lowerCAmelCase)
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET]) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowercase_ = fp.read(_lowerCAmelCase) # CD is where we expect it to be
if len(_lowerCAmelCase) == sizeCentralDir:
lowercase_ = struct.unpack(_lowerCAmelCase , _lowerCAmelCase) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str]):
"""simple docstring"""
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase)
with zipfile.ZipFile(_lowerCAmelCase , """r""") as zip_file:
zip_file.extractall(_lowerCAmelCase)
zip_file.close()
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str]):
"""simple docstring"""
with lzma.open(_lowerCAmelCase) as compressed_file:
with open(_lowerCAmelCase , """wb""") as extracted_file:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str]):
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""")
import rarfile
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase)
lowercase_ = rarfile.RarFile(_lowerCAmelCase)
rf.extractall(_lowerCAmelCase)
rf.close()
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str]):
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""")
import zstandard as zstd
lowercase_ = zstd.ZstdDecompressor()
with open(_lowerCAmelCase , """rb""") as ifh, open(_lowerCAmelCase , """wb""") as ofh:
dctx.copy_stream(_lowerCAmelCase , _lowerCAmelCase)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = [b"\x42\x5A\x68"]
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str]):
"""simple docstring"""
with bza.open(_lowerCAmelCase , """rb""") as compressed_file:
with open(_lowerCAmelCase , """wb""") as extracted_file:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str]):
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""")
import pyazr
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase)
with pyazr.SevenZipFile(_lowerCAmelCase , """r""") as archive:
archive.extractall(_lowerCAmelCase)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = [b"\x04\x22\x4D\x18"]
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str]):
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""")
import lza.frame
with lza.frame.open(_lowerCAmelCase , """rb""") as compressed_file:
with open(_lowerCAmelCase , """wb""") as extracted_file:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase)
class SCREAMING_SNAKE_CASE__ :
lowercase__ = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _UpperCAmelCase ( cls : Dict):
"""simple docstring"""
return max(
len(_lowerCAmelCase)
for extractor in cls.extractors.values()
if issubclass(_lowerCAmelCase , _lowerCAmelCase)
for extractor_magic_number in extractor.magic_numbers)
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : int):
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(_lowerCAmelCase , magic_number_length=_lowerCAmelCase)
except OSError:
return b""
@classmethod
def _UpperCAmelCase ( cls : int , lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : bool = False):
"""simple docstring"""
warnings.warn(
"""Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use \'infer_extractor_format\' instead.""" , category=_lowerCAmelCase , )
lowercase_ = cls.infer_extractor_format(_lowerCAmelCase)
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _UpperCAmelCase ( cls : Optional[Any] , lowerCAmelCase_ : Union[Path, str]): # <Added version="2.4.0"/>
"""simple docstring"""
lowercase_ = cls._get_magic_number_max_length()
lowercase_ = cls._read_magic_number(_lowerCAmelCase , _lowerCAmelCase)
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_lowerCAmelCase , magic_number=_lowerCAmelCase):
return extractor_format
@classmethod
def _UpperCAmelCase ( cls : List[str] , lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[BaseExtractor] = "deprecated" , ):
"""simple docstring"""
os.makedirs(os.path.dirname(_lowerCAmelCase) , exist_ok=_lowerCAmelCase)
# Prevent parallel extractions
lowercase_ = str(Path(_lowerCAmelCase).with_suffix(""".lock"""))
with FileLock(_lowerCAmelCase):
shutil.rmtree(_lowerCAmelCase , ignore_errors=_lowerCAmelCase)
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_lowerCAmelCase , _lowerCAmelCase): # passed as positional arg
warnings.warn(
"""Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use \'extractor_format\' instead.""" , category=_lowerCAmelCase , )
lowercase_ = extractor if extractor != """deprecated""" else extractor_format
else:
lowercase_ = cls.extractors[extractor_format]
return extractor.extract(_lowerCAmelCase , _lowerCAmelCase)
else:
warnings.warn(
"""Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=_lowerCAmelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_lowerCAmelCase):
return extractor.extract(_lowerCAmelCase , _lowerCAmelCase)
| 567
|
'''simple docstring'''
from torch import nn
def _A ( _lowerCAmelCase ):
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"""Unsupported activation function: {act_fn}""" )
| 474
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __lowercase ( __a ):
__UpperCAmelCase = '''bert'''
def __init__( self , lowercase_=3_0_5_2_2 , lowercase_=7_6_8 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=0 , lowercase_="absolute" , lowercase_=True , lowercase_=None , **lowercase_ , ) -> List[Any]:
super().__init__(pad_token_id=A__ , **A__)
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class __lowercase ( __a ):
@property
def _a ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 700
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .')
self.register_modules(
speech_model=lowercase_ , speech_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , )
def _a ( self , lowercase_ = "auto") -> Union[str, Any]:
if slice_size == "auto":
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_)
def _a ( self) -> Any:
self.enable_attention_slicing(lowercase_)
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_=1_6_0_0_0 , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]:
__snake_case = self.speech_processor.feature_extractor(
lowercase_ , return_tensors='pt' , sampling_rate=lowercase_).input_features.to(self.device)
__snake_case = self.speech_model.generate(lowercase_ , max_length=4_8_0_0_0_0)
__snake_case = self.speech_processor.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , normalize=lowercase_)[
0
]
if isinstance(lowercase_ , lowercase_):
__snake_case = 1
elif isinstance(lowercase_ , lowercase_):
__snake_case = len(lowercase_)
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowercase_)}.")
# get prompt text embeddings
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__snake_case = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}")
__snake_case = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case = text_embeddings.shape
__snake_case = text_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case = 42
if negative_prompt is None:
__snake_case = [''] * batch_size
elif type(lowercase_) is not type(lowercase_):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_)} !="
F" {type(lowercase_)}.")
elif isinstance(lowercase_ , lowercase_):
__snake_case = [negative_prompt]
elif batch_size != len(lowercase_):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_)}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.')
else:
__snake_case = negative_prompt
__snake_case = text_input_ids.shape[-1]
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , )
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case = uncond_embeddings.shape[1]
__snake_case = uncond_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device='cpu' , dtype=lowercase_).to(
self.device)
else:
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_)
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
__snake_case = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(lowercase_)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
__snake_case = {}
if accepts_eta:
__snake_case = eta
for i, t in enumerate(self.progress_bar(lowercase_)):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__snake_case = self.scheduler.scale_model_input(lowercase_ , lowercase_)
# predict the noise residual
__snake_case = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.chunk(2)
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_)
__snake_case = 1 / 0.1_8215 * latents
__snake_case = self.vae.decode(lowercase_).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(lowercase_)
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_)
| 676
| 0
|
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_lowerCAmelCase = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def __UpperCamelCase ( snake_case__=True ):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_SCREAMING_SNAKE_CASE ) )
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : int = None
_A : Tuple = None
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ ):
with TemporaryDirectory() as tmp_dir:
A_ : str = dataset_module_factory(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
A_ : int = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase_ )
A_ : DatasetBuilder = builder_cls(
cache_dir=lowerCAmelCase_ , config_name=lowerCAmelCase_ , hash=dataset_module.hash , )
A_ : List[Any] = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=lowerCAmelCase_ ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
A_ : Optional[int] = cached_path(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
self.assertTrue(os.path.exists(lowerCAmelCase_ ) )
@pytest.mark.integration
def __UpperCamelCase ( snake_case__ ):
A_ : List[str] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
A_ : int = dataset_module_factory("""wikipedia""" , cache_dir=snake_case__ )
A_ : List[Any] = import_main_class(dataset_module.module_path )
A_ : DatasetBuilder = builder_cls(
cache_dir=snake_case__ , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
A_ : Any = None
builder_instance.download_and_prepare()
A_ : List[str] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __UpperCamelCase ( snake_case__ ):
A_ : int = dataset_module_factory("""wikipedia""" , cache_dir=snake_case__ )
A_ : Tuple = import_main_class(dataset_module.module_path , dataset=snake_case__ )
A_ : DatasetBuilder = builder_cls(
cache_dir=snake_case__ , config_name="""20220301.frr""" , hash=dataset_module.hash , )
A_ : Tuple = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(snake_case__ , snake_case__ )
assert "train" in ds
assert isinstance(ds["""train"""] , snake_case__ )
assert next(iter(ds["""train"""] ) )
| 180
|
"""simple docstring"""
import numpy as np
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ = 1E-12 , snake_case__ = 100 , ):
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[1]
# Ensure proper dimensionality.
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(snake_case__ ) == np.iscomplexobj(snake_case__ )
A_ : Union[str, Any] = np.iscomplexobj(snake_case__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(snake_case__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
A_ : List[str] = False
A_ : int = 0
A_ : List[Any] = 0
A_ : Tuple = 1E12
while not convergence:
# Multiple matrix by the vector.
A_ : Dict = np.dot(snake_case__ , snake_case__ )
# Normalize the resulting output vector.
A_ : str = w / np.linalg.norm(snake_case__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
A_ : str = vector.conj().T if is_complex else vector.T
A_ : Optional[Any] = np.dot(snake_case__ , np.dot(snake_case__ , snake_case__ ) )
# Check convergence.
A_ : Any = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
A_ : Optional[Any] = True
A_ : Union[str, Any] = lambda_
if is_complex:
A_ : str = np.real(lambda_ )
return lambda_, vector
def __UpperCamelCase ( ):
A_ : Optional[int] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
A_ : Tuple = np.array([41, 4, 20] )
A_ : Tuple = real_input_matrix.astype(np.complexaaa )
A_ : List[Any] = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
A_ : Any = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
A_ : Optional[int] = real_input_matrix
A_ : List[str] = real_vector
elif problem_type == "complex":
A_ : List[Any] = complex_input_matrix
A_ : str = complex_vector
# Our implementation.
A_ , A_ : Dict = power_iteration(snake_case__ , snake_case__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
A_ , A_ : Optional[Any] = np.linalg.eigh(snake_case__ )
# Last eigenvalue is the maximum one.
A_ : List[Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
A_ : Optional[Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(snake_case__ ) - np.abs(snake_case__ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 180
| 1
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
SCREAMING_SNAKE_CASE_ : int = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
SCREAMING_SNAKE_CASE_ : Optional[int] = (
subprocess.check_output(f"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode('utf-8').split()
)
SCREAMING_SNAKE_CASE_ : Dict = '|'.join(sys.argv[1:])
SCREAMING_SNAKE_CASE_ : Tuple = re.compile(rf"""^({joined_dirs}).*?\.py$""")
SCREAMING_SNAKE_CASE_ : Any = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 500
|
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _snake_case ( ):
A__ = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=UpperCAmelCase_ , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=UpperCAmelCase_ , default=5 )
parser.add_argument("""--batch_size""" , type=UpperCAmelCase_ , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=UpperCAmelCase_ , default=1 )
parser.add_argument("""--freeze""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ )
parser.add_argument("""--learning_rate""" , type=UpperCAmelCase_ , default=5e-4 )
parser.add_argument("""--seed""" , type=UpperCAmelCase_ , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=UpperCAmelCase_ , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=UpperCAmelCase_ , default=10 )
parser.add_argument("""--weight_decay""" , type=UpperCAmelCase_ , default=0.01 )
parser.add_argument("""--output_dir""" , type=UpperCAmelCase_ , default="""./results""" )
return parser.parse_args()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = load('accuracy')
def _snake_case ( UpperCAmelCase_ : Tuple ):
A__ , A__ = eval_pred
A__ = np.argmax(UpperCAmelCase_ , axis=1 )
return metric.compute(predictions=UpperCAmelCase_ , references=UpperCAmelCase_ )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[int] , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
super().__init__()
A__ = trainer
def UpperCamelCase ( self: int , UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Dict , **UpperCamelCase: int ):
"""simple docstring"""
if control.should_evaluate:
A__ = deepcopy(UpperCamelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def _snake_case ( ):
A__ = get_args()
set_seed(args.seed )
A__ = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
A__ = dataset.train_test_split(test_size=0.2 )
A__ = train_test["""test"""].train_test_split(test_size=0.5 )
A__ = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
A__ = AutoTokenizer.from_pretrained(args.model_ckpt )
A__ = tokenizer.eos_token
A__ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
A__ = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
A__ = False
A__ = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(UpperCAmelCase_ : int ):
A__ = tokenizer(example["""src"""] , truncation=UpperCAmelCase_ , max_length=1024 )
A__ = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
A__ = train_test_validation.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , remove_columns=train_test_validation["""train"""].column_names , )
A__ = DataCollatorWithPadding(tokenizer=UpperCAmelCase_ )
A__ = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
A__ = Trainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , compute_metrics=UpperCAmelCase_ , )
print("""Training...""" )
trainer.add_callback(CustomCallback(UpperCAmelCase_ ) )
trainer.train()
if __name__ == "__main__":
main()
| 500
| 1
|
'''simple docstring'''
_lowerCAmelCase = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCAmelCase = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCAmelCase = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def _lowerCAmelCase ( lowercase : int , lowercase : int , lowercase : int ) ->str:
"""simple docstring"""
assert len(str(lowercase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 1_2, "month should be between 1 to 12"
assert 1 <= day <= 3_1, "day should be between 1 to 31"
# Doomsday algorithm:
lowercase__ = year // 1_0_0
lowercase__ = (5 * (century % 4) + 2) % 7
lowercase__ = year % 1_0_0
lowercase__ = centurian % 1_2
lowercase__ = (
(centurian // 1_2) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
lowercase__ = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_0_0) == 0)
else DOOMSDAY_LEAP[month - 1]
)
lowercase__ = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 161
|
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __A ( unittest.TestCase ):
"""simple docstring"""
def snake_case_( self )-> Dict:
lowercase__ = 0
@slow
def snake_case_( self )-> int:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
lowercase__ = AutoTokenizer.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_lowerCamelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
lowercase__ = AutoTokenizer.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_lowerCamelCase ) , 0 )
def snake_case_( self )-> List[Any]:
lowercase__ = AutoTokenizer.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def snake_case_( self )-> Optional[Any]:
lowercase__ = AutoTokenizer.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def snake_case_( self )-> str:
lowercase__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
# Check that tokenizer_type ≠ model_type
lowercase__ = AutoTokenizer.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def snake_case_( self )-> int:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_lowerCamelCase , '''vocab.txt''' ) )
lowercase__ = AutoTokenizer.from_pretrained(_lowerCamelCase , tokenizer_type='''bert''' , use_fast=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_lowerCamelCase , '''merges.txt''' ) )
lowercase__ = AutoTokenizer.from_pretrained(_lowerCamelCase , tokenizer_type='''gpt2''' , use_fast=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@require_tokenizers
def snake_case_( self )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_lowerCamelCase , '''vocab.txt''' ) )
lowercase__ = AutoTokenizer.from_pretrained(_lowerCamelCase , tokenizer_type='''bert''' )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_lowerCamelCase , '''merges.txt''' ) )
lowercase__ = AutoTokenizer.from_pretrained(_lowerCamelCase , tokenizer_type='''gpt2''' )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def snake_case_( self )-> Union[str, Any]:
with pytest.raises(_lowerCamelCase ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def snake_case_( self )-> Dict:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
lowercase__ = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(_lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _lowerCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case , _lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def snake_case_( self )-> Optional[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_lowerCamelCase , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
lowercase__ = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def snake_case_( self )-> int:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
lowercase__ = TOKENIZER_MAPPING.values()
lowercase__ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_lowerCamelCase )
@require_tokenizers
def snake_case_( self )-> Union[str, Any]:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=_lowerCamelCase ) , _lowerCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , _lowerCamelCase )
@require_tokenizers
def snake_case_( self )-> List[Any]:
lowercase__ = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=_lowerCamelCase )
lowercase__ = '''Hello, world. How are you?'''
lowercase__ = tokenizer.tokenize(_lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
lowercase__ = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=_lowerCamelCase )
lowercase__ = tokenizer.tokenize(_lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def snake_case_( self )-> List[str]:
lowercase__ = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def snake_case_( self )-> Optional[int]:
lowercase__ = AutoTokenizer.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCamelCase )
lowercase__ = AutoTokenizer.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def snake_case_( self )-> Tuple:
lowercase__ = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def snake_case_( self )-> Tuple:
# Check we can load the tokenizer config of an online model.
lowercase__ = get_tokenizer_config('''bert-base-cased''' )
lowercase__ = config.pop('''_commit_hash''' , _lowerCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_lowerCamelCase , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
lowercase__ = get_tokenizer_config(_lowerCamelCase )
self.assertDictEqual(_lowerCamelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
lowercase__ = AutoTokenizer.from_pretrained(_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCamelCase )
lowercase__ = get_tokenizer_config(_lowerCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def snake_case_( self )-> int:
try:
AutoConfig.register('''custom''' , _lowerCamelCase )
AutoTokenizer.register(_lowerCamelCase , slow_tokenizer_class=_lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCamelCase ):
AutoTokenizer.register(_lowerCamelCase , slow_tokenizer_class=_lowerCamelCase )
lowercase__ = CustomTokenizer.from_pretrained(_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCamelCase )
lowercase__ = AutoTokenizer.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def snake_case_( self )-> Any:
try:
AutoConfig.register('''custom''' , _lowerCamelCase )
# Can register in two steps
AutoTokenizer.register(_lowerCamelCase , slow_tokenizer_class=_lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(_lowerCamelCase , fast_tokenizer_class=_lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_lowerCamelCase , slow_tokenizer_class=_lowerCamelCase , fast_tokenizer_class=_lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCamelCase ):
AutoTokenizer.register(_lowerCamelCase , fast_tokenizer_class=_lowerCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = BertTokenizerFast.from_pretrained(_lowerCamelCase )
bert_tokenizer.save_pretrained(_lowerCamelCase )
lowercase__ = CustomTokenizerFast.from_pretrained(_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCamelCase )
lowercase__ = AutoTokenizer.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
lowercase__ = AutoTokenizer.from_pretrained(_lowerCamelCase , use_fast=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def snake_case_( self )-> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowerCamelCase ):
lowercase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCamelCase ):
lowercase__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_lowerCamelCase )
lowercase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCamelCase )
lowercase__ = AutoTokenizer.from_pretrained(_lowerCamelCase , trust_remote_code=_lowerCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowercase__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_lowerCamelCase , use_fast=_lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCamelCase )
lowercase__ = AutoTokenizer.from_pretrained(_lowerCamelCase , trust_remote_code=_lowerCamelCase , use_fast=_lowerCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def snake_case_( self )-> Optional[Any]:
class __A ( a ):
"""simple docstring"""
A_ = False
class __A ( a ):
"""simple docstring"""
A_ = NewTokenizer
A_ = False
try:
AutoConfig.register('''custom''' , _lowerCamelCase )
AutoTokenizer.register(_lowerCamelCase , slow_tokenizer_class=_lowerCamelCase )
AutoTokenizer.register(_lowerCamelCase , fast_tokenizer_class=_lowerCamelCase )
# If remote code is not set, the default is to use local
lowercase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowercase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=_lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
lowercase__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowercase__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_lowerCamelCase , use_fast=_lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
lowercase__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
lowercase__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_lowerCamelCase , use_fast=_lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def snake_case_( self )-> Dict:
lowercase__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowercase__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_lowerCamelCase , use_fast=_lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def snake_case_( self )-> Union[str, Any]:
with self.assertRaisesRegex(
_lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
lowercase__ = AutoTokenizer.from_pretrained('''bert-base''' )
def snake_case_( self )-> Union[str, Any]:
with self.assertRaisesRegex(
_lowerCamelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase__ = AutoTokenizer.from_pretrained(_lowerCamelCase , revision='''aaaaaa''' )
def snake_case_( self )-> Optional[Any]:
# Make sure we have cached the tokenizer.
lowercase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
lowercase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 161
| 1
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def lowercase_ ( lowercase__ ) ->Optional[Any]:
return np.maximum(0 , lowercase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 273
|
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
A : Dict = '\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n'
A : int = '\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results[\'matthews_correlation\'], 2))\n -0.25\n'
A : Dict = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , __snake_case : int , __snake_case : List[Any] , __snake_case : Union[str, Any]=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(__snake_case , __snake_case , sample_weight=__snake_case ) ),
}
| 273
| 1
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class UpperCAmelCase_ ( __A , __A ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , UpperCAmelCase : int = 768 , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
lowercase : Optional[int] =nn.Parameter(torch.zeros(1 , UpperCAmelCase ) )
lowercase : Tuple =nn.Parameter(torch.ones(1 , UpperCAmelCase ) )
def A__ ( self : Union[str, Any] , UpperCAmelCase : Optional[Union[str, torch.device]] = None , UpperCAmelCase : Optional[torch.dtype] = None , ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =nn.Parameter(self.mean.to(UpperCAmelCase ).to(UpperCAmelCase ) )
lowercase : Union[str, Any] =nn.Parameter(self.std.to(UpperCAmelCase ).to(UpperCAmelCase ) )
return self
def A__ ( self : int , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =(embeds - self.mean) * 1.0 / self.std
return embeds
def A__ ( self : int , UpperCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : Dict =(embeds * self.std) + self.mean
return embeds
| 94
|
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_a : Tuple = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
_a : int = direct_transformers_import(PATH_TO_TRANSFORMERS)
_a : Dict = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_a : int = re.compile(R"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
_a : Any = {
"DecisionTransformerConfig",
"EncoderDecoderConfig",
"MusicgenConfig",
"RagConfig",
"SpeechEncoderDecoderConfig",
"TimmBackboneConfig",
"VisionEncoderDecoderConfig",
"VisionTextDualEncoderConfig",
"LlamaConfig",
}
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : Any = None
# source code of `config_class`
__UpperCAmelCase : Any = inspect.getsource(lowerCamelCase__ )
__UpperCAmelCase : List[Any] = _re_checkpoint.findall(lowerCamelCase__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
__UpperCAmelCase : List[str] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
__UpperCAmelCase : List[str] = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
__UpperCAmelCase : Union[str, Any] = ckpt_name
break
return checkpoint
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
__UpperCAmelCase : List[Any] = get_checkpoint_from_config_class(lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__UpperCAmelCase : Optional[Any] = "\n".join(sorted(lowerCamelCase__ ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 168
| 0
|
import math
import sys
import cva
import numpy as np
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
_a = math.sqrt(_UpperCamelCase )
_a = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> np.ndarray:
_a = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
_a = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _UpperCamelCase ):
for j in range(0 , _UpperCamelCase ):
_a = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_UpperCamelCase , _UpperCamelCase )
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> np.ndarray:
_a = np.zeros(img.shape )
_a = get_gauss_kernel(_UpperCamelCase , _UpperCamelCase )
_a , _a = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
_a = get_slice(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_a = img_s - img_s[kernel_size // 2, kernel_size // 2]
_a = vec_gaussian(_UpperCamelCase , _UpperCamelCase )
_a = np.multiply(_UpperCamelCase , _UpperCamelCase )
_a = np.multiply(_UpperCamelCase , _UpperCamelCase )
_a = np.sum(_UpperCamelCase ) / np.sum(_UpperCamelCase )
_a = val
return imga
def __snake_case ( _UpperCamelCase ) -> tuple:
_a = args[1] if args[1:] else '''../image_data/lena.jpg'''
_a = float(args[2] ) if args[2:] else 1.0
_a = float(args[3] ) if args[3:] else 1.0
if args[4:]:
_a = int(args[4] )
_a = kernel_size + abs(kernel_size % 2 - 1 )
else:
_a = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :List[Any] = parse_args(sys.argv)
lowerCamelCase :List[Any] = cva.imread(filename, 0)
cva.imshow('input image', img)
lowerCamelCase :Optional[Any] = img / 255
lowerCamelCase :Any = out.astype('float32')
lowerCamelCase :Tuple = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowerCamelCase :Dict = out * 255
lowerCamelCase :Any = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 346
|
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_UpperCamelCase ) )
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> bool:
# Base Case
if index == len(_UpperCamelCase ):
return True
# Recursive Step
for i in range(_UpperCamelCase ):
if valid_coloring(graph[index] , _UpperCamelCase , _UpperCamelCase ):
# Color current vertex
_a = i
# Validate coloring
if util_color(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , index + 1 ):
return True
# Backtrack
_a = -1
return False
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> list[int]:
_a = [-1] * len(_UpperCamelCase )
if util_color(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , 0 ):
return colored_vertices
return []
| 346
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
lowerCAmelCase__ : str = "audio-spectrogram-transformer"
def __init__( self: Any , __lowerCAmelCase: int=768 , __lowerCAmelCase: str=12 , __lowerCAmelCase: Optional[Any]=12 , __lowerCAmelCase: Optional[int]=3_072 , __lowerCAmelCase: List[Any]="gelu" , __lowerCAmelCase: int=0.0 , __lowerCAmelCase: str=0.0 , __lowerCAmelCase: Optional[Any]=0.02 , __lowerCAmelCase: Dict=1E-12 , __lowerCAmelCase: List[Any]=16 , __lowerCAmelCase: Dict=True , __lowerCAmelCase: int=10 , __lowerCAmelCase: Optional[Any]=10 , __lowerCAmelCase: Union[str, Any]=1_024 , __lowerCAmelCase: Optional[Any]=128 , **__lowerCAmelCase: Dict , ) -> List[Any]:
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = patch_size
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = frequency_stride
__UpperCAmelCase = time_stride
__UpperCAmelCase = max_length
__UpperCAmelCase = num_mel_bins
| 221
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __lowercase : Any ) -> List[Any]:
_snake_case = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a_ ( __lowercase : Dict ) -> Tuple:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_snake_case = emb.weight.data
return lin_layer
def a_ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None ) -> Tuple:
_snake_case = {}
for old_key in state_dict.keys():
_snake_case = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_snake_case = key.replace('moe_layer.experts.0' , f'''ffn.experts.expert_{expert_idx}''' )
else:
_snake_case = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
_snake_case = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
_snake_case = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
_snake_case = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
_snake_case = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
_snake_case = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
_snake_case = key.replace('final_layer_norm' , 'ff_layer_norm' )
_snake_case = state_dict[old_key]
return new_dict
def a_ ( __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : Any , __lowercase : List[str] , __lowercase : str = WEIGHTS_NAME ) -> Union[str, Any]:
_snake_case = []
_snake_case = 0
os.makedirs(__lowercase , exist_ok=__lowercase )
for expert in range(__lowercase ):
_snake_case = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(__lowercase ):
_snake_case = torch.load(__lowercase )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = os.path.join(
__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
torch.save(__lowercase , __lowercase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowercase )[0]].dtype )
# Add the last block
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
_snake_case = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowercase ) == 1:
_snake_case = os.path.join(__lowercase , __lowercase )
torch.save(__lowercase , __lowercase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowercase , __lowercase )
# Otherwise, let's build the index
_snake_case = {}
for idx, shard in enumerate(__lowercase ):
_snake_case = weights_name.replace('.bin' , f'''-{idx+1:05d}-of-{len(__lowercase ):05d}.bin''' )
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__lowercase , os.path.join(__lowercase , __lowercase ) )
for key in shard:
_snake_case = shard_file
# Add the metadata
_snake_case = {'total_size': total_size}
_snake_case = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__lowercase , __lowercase ) , 'w' , encoding='utf-8' ) as f:
_snake_case = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase ) + '\n'
f.write(__lowercase )
return metadata, index
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowerCamelCase : Tuple = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowerCamelCase : Dict = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path)
| 686
| 0
|
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
a_ = logging.getLogger(__name__)
a_ = "pytorch_model.bin"
@dataclasses.dataclass
class snake_case :
__UpperCamelCase = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'})
__UpperCamelCase = dataclasses.field(
default=_UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class snake_case :
__UpperCamelCase = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'})
__UpperCamelCase = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'})
__UpperCamelCase = dataclasses.field(
default=_UpperCamelCase , metadata={'help': 'A csv or a json file containing the validation data.'})
__UpperCamelCase = dataclasses.field(
default=_UpperCamelCase , metadata={'help': 'The name of the task to train on.'} , )
__UpperCamelCase = dataclasses.field(
default=_UpperCamelCase , metadata={'help': 'The list of labels for the task.'})
@dataclasses.dataclass
class snake_case :
__UpperCamelCase = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'})
__UpperCamelCase = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'})
__UpperCamelCase = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
__UpperCamelCase = dataclasses.field(
default=10 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
__UpperCamelCase = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
__UpperCamelCase = dataclasses.field(
default=_UpperCamelCase , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
__UpperCamelCase = dataclasses.field(
default=_UpperCamelCase , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
__UpperCamelCase = dataclasses.field(
default=_UpperCamelCase , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
__UpperCamelCase = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
__UpperCamelCase = dataclasses.field(
default=100 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
__UpperCamelCase = dataclasses.field(
default=_UpperCamelCase , metadata={'help': 'Random seed for initialization.'} , )
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
_A = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_A = dataset.filter(lambda __lowercase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_A = int(eval_result * len(__lowercase ) )
print(__lowercase )
_A = dataset.sort("probability" , reverse=__lowercase )
_A = dataset.select(range(__lowercase ) )
_A = dataset.remove_columns(["label", "probability"] )
_A = dataset.rename_column("prediction" , "label" )
_A = dataset.map(lambda __lowercase : {"label": idalabel[example["label"]]} )
_A = dataset.shuffle(seed=args.seed )
_A = os.path.join(__lowercase , f"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(__lowercase , index=__lowercase )
else:
dataset.to_json(__lowercase )
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , **__lowercase ) -> List[Any]:
_A = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_A = STModelArguments(model_name_or_path=__lowercase )
_A = STDataArguments(train_file=__lowercase , infer_file=__lowercase )
_A = STTrainingArguments(output_dir=__lowercase )
_A = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__lowercase ).items():
setattr(__lowercase , __lowercase , __lowercase )
for key, value in kwargs.items():
if hasattr(__lowercase , __lowercase ):
setattr(__lowercase , __lowercase , __lowercase )
# Sanity checks
_A = {}
_A = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_A = args.train_file
_A = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_A = args.eval_file
for key in data_files:
_A = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
_A = extension
else:
assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
_A = f"""{args.output_dir}/self-train_iter-{{}}""".format
_A = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=__lowercase )
os.makedirs(__lowercase , exist_ok=__lowercase )
accelerator.wait_for_everyone()
_A = None
_A = None
_A = 0
_A = False
# Show the progress bar
_A = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_A = data_dir_format(__lowercase )
assert os.path.exists(__lowercase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_A = os.path.join(__lowercase , "stage-1" )
_A = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__lowercase , __lowercase ):
arguments_dict.update({key: value} )
_A = os.path.join(__lowercase , "best-checkpoint" , __lowercase )
if os.path.exists(__lowercase ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , __lowercase , __lowercase , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , __lowercase )
finetune(**__lowercase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowercase )
logger.info("Self-training job completed: iteration: %d, stage: 1." , __lowercase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_A = os.path.join(__lowercase , "best-checkpoint" )
_A = os.path.join(__lowercase , "stage-2" )
# Update arguments_dict
_A = model_path
_A = data_files["train"]
_A = current_output_dir
_A = os.path.join(__lowercase , "best-checkpoint" , __lowercase )
if os.path.exists(__lowercase ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , __lowercase , __lowercase , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , __lowercase )
finetune(**__lowercase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowercase )
logger.info("Self-training job completed: iteration: %d, stage: 2." , __lowercase )
_A = iteration
_A = data_dir_format(iteration + 1 )
_A = AutoConfig.from_pretrained(os.path.join(__lowercase , "best-checkpoint" ) )
_A = config.idalabel
_A = os.path.join(__lowercase , "eval_results_best-checkpoint.json" )
_A = os.path.join(__lowercase , "test_results_best-checkpoint.json" )
assert os.path.exists(__lowercase )
with open(__lowercase , "r" ) as f:
_A = float(json.load(__lowercase )[args.eval_metric] )
_A = os.path.join(__lowercase , "infer_output_best-checkpoint.csv" )
assert os.path.exists(__lowercase )
# Loading the dataset from local csv or json files.
_A = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"]
_A = load_dataset("csv" , data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(__lowercase , exist_ok=__lowercase )
shutil.copy(__lowercase , os.path.join(__lowercase , f"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(__lowercase ):
shutil.copy(__lowercase , os.path.join(__lowercase , f"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
accelerator.wait_for_everyone()
_A = os.path.join(__lowercase , f"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_A = eval_result
if best_iteration is None:
_A = new_iteration
_A = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_A = new_iteration
_A = new_eval_result
_A = 0
else:
if new_eval_result == best_eval_result:
_A = new_iteration
_A = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_A = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , __lowercase )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , __lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowercase , f"""eval_results_iter-{iteration}.json""" ) , os.path.join(__lowercase , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , __lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowercase , f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(__lowercase , "eval_results_best-iteration.json" ) , )
| 706
|
"""simple docstring"""
import random
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
_A = a[left_index]
_A = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_A , _A = a[i], a[j]
i += 1
_A , _A = a[i - 1], a[left_index]
return i - 1
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
if left < right:
_A = random.randint(__lowercase , right - 1 )
_A , _A = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_A = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def a__ ( ) -> Dict:
_A = input("Enter numbers separated by a comma:\n" ).strip()
_A = [int(__lowercase ) for item in user_input.split("," )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main()
| 621
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : int = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4
|
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Any = np.max(_outputs , axis=-1 , keepdims=_lowerCamelCase )
_lowerCamelCase : Dict = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCamelCase )
class A_ ( _a ):
lowerCAmelCase__ = 'sigmoid'
lowerCAmelCase__ = 'softmax'
lowerCAmelCase__ = 'none'
@add_end_docstrings(
_a , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class A_ ( _a ):
lowerCAmelCase__ = False
lowerCAmelCase__ = ClassificationFunction.NONE
def __init__( self: str ,**__lowerCAmelCase: str ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _lowercase ( self: Dict ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: List[Any]="" ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = tokenizer_kwargs
_lowerCamelCase : Optional[int] = {}
if hasattr(self.model.config ,"return_all_scores" ) and return_all_scores is None:
_lowerCamelCase : Tuple = self.model.config.return_all_scores
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or top_k is None:
_lowerCamelCase : List[str] = top_k
_lowerCamelCase : Union[str, Any] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." ,__lowerCAmelCase ,)
if return_all_scores:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : Union[str, Any] = 1
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_lowerCamelCase : Dict = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self: int ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = super().__call__(*__lowerCAmelCase ,**__lowerCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_lowerCamelCase : Optional[Any] = "top_k" not in kwargs
if isinstance(args[0] ,__lowerCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = self.framework
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
return self.tokenizer(**__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) == 1 and isinstance(inputs[0] ,__lowerCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] ,text_pair=inputs[0][1] ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: int ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return self.model(**__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: str=1 ,__lowerCAmelCase: Dict=True ):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_lowerCamelCase : Dict = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_lowerCamelCase : List[Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config ,"function_to_apply" ) and function_to_apply is None:
_lowerCamelCase : Optional[int] = self.model.config.function_to_apply
else:
_lowerCamelCase : str = ClassificationFunction.NONE
_lowerCamelCase : List[Any] = model_outputs["logits"][0]
_lowerCamelCase : Optional[int] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_lowerCamelCase : str = sigmoid(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_lowerCamelCase : Optional[int] = softmax(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
_lowerCamelCase : str = outputs
else:
raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_lowerCamelCase : Optional[int] = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(__lowerCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda __lowerCAmelCase : x["score"] ,reverse=__lowerCAmelCase )
if top_k is not None:
_lowerCamelCase : Any = dict_scores[:top_k]
return dict_scores
| 46
| 0
|
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class UpperCAmelCase ( __snake_case ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'char'
SCREAMING_SNAKE_CASE_ = 'bpe'
SCREAMING_SNAKE_CASE_ = 'wp'
A_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class UpperCAmelCase ( __snake_case ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = ['image_processor', 'char_tokenizer']
SCREAMING_SNAKE_CASE_ = 'ViTImageProcessor'
SCREAMING_SNAKE_CASE_ = 'MgpstrTokenizer'
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _lowercase , )
lowerCamelCase_ = kwargs.pop('feature_extractor' )
lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
lowerCamelCase_ = tokenizer
lowerCamelCase_ = AutoTokenizer.from_pretrained('gpt2' )
lowerCamelCase_ = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(_lowercase , _lowercase )
def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
lowerCamelCase_ = self.image_processor(_lowercase , return_tensors=_lowercase , **_lowercase )
if text is not None:
lowerCamelCase_ = self.char_tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCamelCase_ = encodings["""input_ids"""]
return inputs
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = sequences
lowerCamelCase_ = char_preds.size(0 )
lowerCamelCase_ = self._decode_helper(_lowercase , 'char' )
lowerCamelCase_ = self._decode_helper(_lowercase , 'bpe' )
lowerCamelCase_ = self._decode_helper(_lowercase , 'wp' )
lowerCamelCase_ = []
lowerCamelCase_ = []
for i in range(_lowercase ):
lowerCamelCase_ = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCamelCase_ = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCamelCase_ = scores.index(max(_lowercase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCamelCase_ = {}
lowerCamelCase_ = final_strs
lowerCamelCase_ = final_scores
lowerCamelCase_ = char_strs
lowerCamelCase_ = bpe_strs
lowerCamelCase_ = wp_strs
return out
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
if format == DecodeType.CHARACTER:
lowerCamelCase_ = self.char_decode
lowerCamelCase_ = 1
lowerCamelCase_ = """[s]"""
elif format == DecodeType.BPE:
lowerCamelCase_ = self.bpe_decode
lowerCamelCase_ = 2
lowerCamelCase_ = """#"""
elif format == DecodeType.WORDPIECE:
lowerCamelCase_ = self.wp_decode
lowerCamelCase_ = 102
lowerCamelCase_ = """[SEP]"""
else:
raise ValueError(f'''Format {format} is not supported.''' )
lowerCamelCase_ = [], []
lowerCamelCase_ = pred_logits.size(0 )
lowerCamelCase_ = pred_logits.size(1 )
lowerCamelCase_ = pred_logits.topk(1 , dim=-1 , largest=_lowercase , sorted=_lowercase )
lowerCamelCase_ = preds_index.view(-1 , _lowercase )[:, 1:]
lowerCamelCase_ = decoder(_lowercase )
lowerCamelCase_ = torch.nn.functional.softmax(_lowercase , dim=2 ).max(dim=2 )
lowerCamelCase_ = preds_max_prob[:, 1:]
for index in range(_lowercase ):
lowerCamelCase_ = preds_str[index].find(_lowercase )
lowerCamelCase_ = preds_str[index][:pred_eos]
lowerCamelCase_ = preds_index[index].cpu().tolist()
lowerCamelCase_ = pred_index.index(_lowercase ) if eos_token in pred_index else -1
lowerCamelCase_ = preds_max_prob[index][: pred_eos_index + 1]
lowerCamelCase_ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(_lowercase )
conf_scores.append(_lowercase )
return dec_strs, conf_scores
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(_lowercase )]
return decode_strs
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(_lowercase )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(_lowercase )]
return decode_strs
| 719
|
'''simple docstring'''
import os
import sys
import unittest
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
A_ = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
A_ = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {'BertModelTest': 'BertModelTester'}
lowerCamelCase_ = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = get_model_to_test_mapping(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = get_model_to_test_mapping(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
lowerCamelCase_ = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
lowerCamelCase_ = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
| 384
| 0
|
def __lowerCAmelCase ( A = 600851475143 ):
try:
UpperCAmelCase_ = int(A )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
UpperCAmelCase_ = 1
UpperCAmelCase_ = 2
while i * i <= n:
while n % i == 0:
UpperCAmelCase_ = i
n //= i
i += 1
if n > 1:
UpperCAmelCase_ = n
return int(A )
if __name__ == "__main__":
print(F'{solution() = }')
| 162
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a: int = logging.get_logger(__name__)
_a: Optional[Any] = {
"""SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = 'deformable_detr'
SCREAMING_SNAKE_CASE__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[int] , lowerCAmelCase : Any=True , lowerCAmelCase : str=None , lowerCAmelCase : List[str]=3 , lowerCAmelCase : Tuple=300 , lowerCAmelCase : List[str]=1_024 , lowerCAmelCase : Any=6 , lowerCAmelCase : str=1_024 , lowerCAmelCase : Any=8 , lowerCAmelCase : Dict=6 , lowerCAmelCase : Any=1_024 , lowerCAmelCase : Dict=8 , lowerCAmelCase : str=0.0 , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[int]="relu" , lowerCAmelCase : List[str]=256 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : str=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : List[str]=1.0 , lowerCAmelCase : Dict=True , lowerCAmelCase : int=False , lowerCAmelCase : Tuple="sine" , lowerCAmelCase : Optional[Any]="resnet50" , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Dict=4 , lowerCAmelCase : int=4 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : int=False , lowerCAmelCase : Any=300 , lowerCAmelCase : Dict=False , lowerCAmelCase : Any=1 , lowerCAmelCase : Tuple=5 , lowerCAmelCase : str=2 , lowerCAmelCase : int=1 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Any=5 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[Any]=0.25 , lowerCAmelCase : List[str]=False , **lowerCAmelCase : Union[str, Any] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = backbone_config.get("model_type" )
UpperCAmelCase_ = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ = config_class.from_dict(lowerCAmelCase )
UpperCAmelCase_ = use_timm_backbone
UpperCAmelCase_ = backbone_config
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = num_queries
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = d_model
UpperCAmelCase_ = encoder_ffn_dim
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = encoder_attention_heads
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = init_std
UpperCAmelCase_ = init_xavier_std
UpperCAmelCase_ = encoder_layerdrop
UpperCAmelCase_ = auxiliary_loss
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = backbone
UpperCAmelCase_ = use_pretrained_backbone
UpperCAmelCase_ = dilation
# deformable attributes
UpperCAmelCase_ = num_feature_levels
UpperCAmelCase_ = encoder_n_points
UpperCAmelCase_ = decoder_n_points
UpperCAmelCase_ = two_stage
UpperCAmelCase_ = two_stage_num_proposals
UpperCAmelCase_ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
UpperCAmelCase_ = class_cost
UpperCAmelCase_ = bbox_cost
UpperCAmelCase_ = giou_cost
# Loss coefficients
UpperCAmelCase_ = mask_loss_coefficient
UpperCAmelCase_ = dice_loss_coefficient
UpperCAmelCase_ = bbox_loss_coefficient
UpperCAmelCase_ = giou_loss_coefficient
UpperCAmelCase_ = eos_coefficient
UpperCAmelCase_ = focal_alpha
UpperCAmelCase_ = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCAmelCase , **lowerCAmelCase )
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def __A ( self : Any ):
'''simple docstring'''
return self.d_model
def __A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCAmelCase_ = self.backbone_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
| 162
| 1
|
"""simple docstring"""
from __future__ import annotations
snake_case = '''Muhammad Umer Farooq'''
snake_case = '''MIT'''
snake_case = '''1.0.0'''
snake_case = '''Muhammad Umer Farooq'''
snake_case = '''contact@muhammadumerfarooq.me'''
snake_case = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] , __lowerCamelCase : str ):
"""simple docstring"""
super().__init__()
_snake_case = []
_snake_case = domain
def __UpperCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : list[tuple[str, str | None]] ):
"""simple docstring"""
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_snake_case = parse.urljoin(self.domain , __lowerCamelCase )
self.urls.append(__lowerCamelCase )
def snake_case ( lowerCAmelCase_ ) -> str:
return ".".join(get_sub_domain_name(lowerCAmelCase_ ).split('''.''' )[-2:] )
def snake_case ( lowerCAmelCase_ ) -> str:
return parse.urlparse(lowerCAmelCase_ ).netloc
def snake_case ( lowerCAmelCase_ = "https://github.com" ) -> list[str]:
_snake_case = get_domain_name(lowerCAmelCase_ )
# Initialize the parser
_snake_case = Parser(lowerCAmelCase_ )
try:
# Open URL
_snake_case = requests.get(lowerCAmelCase_ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_snake_case = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_snake_case = requests.get(lowerCAmelCase_ )
# Get the valid email.
_snake_case = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(lowerCAmelCase_ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(lowerCAmelCase_ )
if __name__ == "__main__":
snake_case = emails_from_url('''https://github.com''')
print(F"{len(emails)} emails found:")
print('''\n'''.join(sorted(emails)))
| 404
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any]=1_3 , __lowerCamelCase : Optional[int]=7 , __lowerCamelCase : str=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Any=9_9 , __lowerCamelCase : Any=3_2 , __lowerCamelCase : Any=5 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Any=3_7 , __lowerCamelCase : Any="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[Any]=5_1_2 , __lowerCamelCase : Dict=1_6 , __lowerCamelCase : int=2 , __lowerCamelCase : Optional[Any]=0.0_2 , __lowerCamelCase : Dict=4 , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_attention_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_choices
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_attention_mask:
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__lowerCamelCase , )
return config, input_ids, attention_mask
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : Optional[int] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = FlaxDistilBertModelTester(self )
@slow
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
_snake_case = model_class_name.from_pretrained('''distilbert-base-uncased''' )
_snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCamelCase )
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
_snake_case = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_snake_case = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
_snake_case = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , __lowerCamelCase )
_snake_case = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1E-4 ) )
| 404
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : str = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _UpperCamelCase ( A,A ):
'''simple docstring'''
a_ : Dict = "nat"
a_ : List[Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[Any] , _lowerCamelCase : str=4 , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Union[str, Any]=6_4 , _lowerCamelCase : str=[3, 4, 6, 5] , _lowerCamelCase : List[Any]=[2, 4, 8, 1_6] , _lowerCamelCase : str=7 , _lowerCamelCase : int=3.0 , _lowerCamelCase : Dict=True , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : int=0.1 , _lowerCamelCase : int="gelu" , _lowerCamelCase : int=0.02 , _lowerCamelCase : str=1E-5 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Dict=None , _lowerCamelCase : str=None , **_lowerCamelCase : Any , ):
'''simple docstring'''
super().__init__(**_lowerCamelCase )
__lowerCamelCase : Any = patch_size
__lowerCamelCase : Optional[int] = num_channels
__lowerCamelCase : Dict = embed_dim
__lowerCamelCase : Optional[int] = depths
__lowerCamelCase : Union[str, Any] = len(_lowerCamelCase )
__lowerCamelCase : Optional[Any] = num_heads
__lowerCamelCase : Any = kernel_size
__lowerCamelCase : Optional[Any] = mlp_ratio
__lowerCamelCase : List[Any] = qkv_bias
__lowerCamelCase : int = hidden_dropout_prob
__lowerCamelCase : List[str] = attention_probs_dropout_prob
__lowerCamelCase : Tuple = drop_path_rate
__lowerCamelCase : Optional[Any] = hidden_act
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : Union[str, Any] = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCamelCase : Optional[Any] = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) )
__lowerCamelCase : List[str] = layer_scale_init_value
__lowerCamelCase : List[Any] = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_lowerCamelCase ) + 1 )]
__lowerCamelCase , __lowerCamelCase : Tuple = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
| 519
|
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__UpperCamelCase : str = logging.getLogger(__name__)
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : List[str] , _lowerCamelCase : List[Any]=-1 ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = label_idx
def _snake_case ( self : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : Union[Split, str] ):
'''simple docstring'''
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__lowerCamelCase : Dict = mode.value
__lowerCamelCase : Tuple = os.path.join(_lowerCamelCase , F"""{mode}.txt""" )
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : List[str] = []
with open(_lowerCamelCase , encoding="""utf-8""" ) as f:
__lowerCamelCase : List[Any] = []
__lowerCamelCase : Optional[int] = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Tuple = []
else:
__lowerCamelCase : Dict = line.split(""" """ )
words.append(splits[0] )
if len(_lowerCamelCase ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=_lowerCamelCase , labels=_lowerCamelCase ) )
return examples
def _snake_case ( self : str , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ):
'''simple docstring'''
__lowerCamelCase : str = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(_lowerCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__lowerCamelCase : str = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(_lowerCamelCase )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def _snake_case ( self : List[str] , _lowerCamelCase : str ):
'''simple docstring'''
if path:
with open(_lowerCamelCase , """r""" ) as f:
__lowerCamelCase : Dict = f.read().splitlines()
if "O" not in labels:
__lowerCamelCase : int = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : str ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def _snake_case ( self : Tuple , _lowerCamelCase : str ):
'''simple docstring'''
if path:
with open(_lowerCamelCase , """r""" ) as f:
__lowerCamelCase : List[str] = f.read().splitlines()
if "O" not in labels:
__lowerCamelCase : Tuple = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class _UpperCamelCase ( A ):
'''simple docstring'''
def _snake_case ( self : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[Split, str] ):
'''simple docstring'''
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__lowerCamelCase : str = mode.value
__lowerCamelCase : List[str] = os.path.join(_lowerCamelCase , F"""{mode}.txt""" )
__lowerCamelCase : str = 1
__lowerCamelCase : Dict = []
with open(_lowerCamelCase , encoding="""utf-8""" ) as f:
for sentence in parse_incr(_lowerCamelCase ):
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : int = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
return examples
def _snake_case ( self : Any , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ):
'''simple docstring'''
__lowerCamelCase : Dict = 0
for sentence in parse_incr(_lowerCamelCase ):
__lowerCamelCase : List[str] = preds_list[example_id]
__lowerCamelCase : Tuple = """"""
for token in sentence:
out += F"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """
out += "\n"
writer.write(_lowerCamelCase )
example_id += 1
def _snake_case ( self : Optional[int] , _lowerCamelCase : str ):
'''simple docstring'''
if path:
with open(_lowerCamelCase , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 519
| 1
|
import math
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = []
__lowercase = 2
__lowercase = int(math.sqrt(_lowerCamelCase ) ) # Size of every segment
__lowercase = [True] * (end + 1)
__lowercase = []
while start <= end:
if temp[start] is True:
in_prime.append(_lowerCamelCase )
for i in range(start * start , end + 1 , _lowerCamelCase ):
__lowercase = False
start += 1
prime += in_prime
__lowercase = end + 1
__lowercase = min(2 * end , _lowerCamelCase )
while low <= n:
__lowercase = [True] * (high - low + 1)
for each in in_prime:
__lowercase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_lowerCamelCase , high + 1 , _lowerCamelCase ):
__lowercase = False
for j in range(len(_lowerCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
__lowercase = high + 1
__lowercase = min(high + end , _lowerCamelCase )
return prime
print(sieve(10**6))
| 700
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : Union[str, Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
a : Union[str, Any] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
a : Dict = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase_ ( ):
'''simple docstring'''
__lowercase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__lowercase = bs[:]
__lowercase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
__lowercase = [chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase = char
return pairs
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , snake_case_ , snake_case_ , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , **snake_case_ , ) -> Optional[int]:
'''simple docstring'''
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else bos_token
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else eos_token
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else sep_token
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else cls_token
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else unk_token
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , **snake_case_ , )
with open(snake_case_ , encoding='''utf-8''' ) as vocab_handle:
__lowercase = json.load(snake_case_ )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = errors # how to handle errors in decoding
__lowercase = bytes_to_unicode()
__lowercase = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case_ , encoding='''utf-8''' ) as merges_handle:
__lowercase = merges_handle.read().split('''\n''' )[1:-1]
__lowercase = [tuple(merge.split() ) for merge in bpe_merges]
__lowercase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
__lowercase = {}
__lowercase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowercase = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def A ( self ) -> List[str]:
'''simple docstring'''
return len(self.encoder )
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A ( self , snake_case_ ) -> List[str]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowercase = tuple(snake_case_ )
__lowercase = get_pairs(snake_case_ )
if not pairs:
return token
while True:
__lowercase = min(snake_case_ , key=lambda snake_case_ : self.bpe_ranks.get(snake_case_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase = bigram
__lowercase = []
__lowercase = 0
while i < len(snake_case_ ):
try:
__lowercase = word.index(snake_case_ , snake_case_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase = j
if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase = tuple(snake_case_ )
__lowercase = new_word
if len(snake_case_ ) == 1:
break
else:
__lowercase = get_pairs(snake_case_ )
__lowercase = ''' '''.join(snake_case_ )
__lowercase = word
return word
def A ( self , snake_case_ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = []
for token in re.findall(self.pat , snake_case_ ):
__lowercase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case_ ).split(''' ''' ) )
return bpe_tokens
def A ( self , snake_case_ ) -> Any:
'''simple docstring'''
return self.encoder.get(snake_case_ , self.encoder.get(self.unk_token ) )
def A ( self , snake_case_ ) -> Dict:
'''simple docstring'''
return self.decoder.get(snake_case_ )
def A ( self , snake_case_ ) -> List[str]:
'''simple docstring'''
__lowercase = ''''''.join(snake_case_ )
__lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + '''\n''' )
__lowercase = 0
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
__lowercase = token_index
writer.write(''' '''.join(snake_case_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A ( self , snake_case_ , snake_case_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self , snake_case_ , snake_case_ = None , snake_case_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def A ( self , snake_case_ , snake_case_ = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A ( self , snake_case_ , snake_case_=False , **snake_case_ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case_ ) > 0 and not text[0].isspace()):
__lowercase = ''' ''' + text
return (text, kwargs)
| 527
| 0
|
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = XGLMTokenizer
UpperCAmelCase__ = XGLMTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = True
def snake_case__ ( self : List[str] ) ->Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase : List[Any] = XGLMTokenizer(lowercase__ , keep_accents=lowercase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : int ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = "<pad>"
_UpperCamelCase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def snake_case__ ( self : List[str] ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(lowercase__ ) , 1_008 )
def snake_case__ ( self : Tuple ) ->List[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def snake_case__ ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCamelCase : Any = XGLMTokenizer(lowercase__ , keep_accents=lowercase__ )
_UpperCamelCase : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_UpperCamelCase : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_UpperCamelCase : int = tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(
lowercase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_UpperCamelCase : Dict = tokenizer.convert_ids_to_tokens(lowercase__ )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def snake_case__ ( self : int ) ->Dict:
'''simple docstring'''
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def snake_case__ ( self : Tuple ) ->Any:
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowercase__ , f.name )
_UpperCamelCase : List[str] = XGLMTokenizer(f.name , keep_accents=lowercase__ )
_UpperCamelCase : Optional[Any] = pickle.dumps(lowercase__ )
pickle.loads(lowercase__ )
def snake_case__ ( self : Tuple ) ->Dict:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_UpperCamelCase : List[Any] = self.get_tokenizer()
_UpperCamelCase : List[Any] = self.get_rust_tokenizer()
_UpperCamelCase : Tuple = "I was born in 92000, and this is falsé."
_UpperCamelCase : Union[str, Any] = tokenizer.tokenize(lowercase__ )
_UpperCamelCase : Optional[Any] = rust_tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
_UpperCamelCase : List[Any] = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
_UpperCamelCase : Union[str, Any] = rust_tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
_UpperCamelCase : Tuple = self.get_rust_tokenizer()
_UpperCamelCase : List[str] = tokenizer.encode(lowercase__ )
_UpperCamelCase : Union[str, Any] = rust_tokenizer.encode(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
@slow
def snake_case__ ( self : Any ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase : List[str] = "Hello World!"
_UpperCamelCase : List[Any] = [2, 31_227, 4_447, 35]
self.assertListEqual(lowercase__ , self.big_tokenizer.encode(lowercase__ ) )
@slow
def snake_case__ ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
_UpperCamelCase : List[str] = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowercase__ , self.big_tokenizer.encode(lowercase__ ) )
@slow
def snake_case__ ( self : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCamelCase : Any = {
"input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name="facebook/xglm-564M" , padding=lowercase__ , )
| 435
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowerCAmelCase_ : List[str] = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class SCREAMING_SNAKE_CASE ( unittest.TestCase , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def snake_case__ ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : str = load_tool("text-question-answering" )
self.tool.setup()
_UpperCamelCase : str = load_tool("text-question-answering" , remote=lowercase__ )
def snake_case__ ( self : List[Any] ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : int = self.tool(lowercase__ , "What did Hugging Face do in April 2021?" )
self.assertEqual(lowercase__ , "launched the BigScience Research Workshop" )
def snake_case__ ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.remote_tool(lowercase__ , "What did Hugging Face do in April 2021?" )
self.assertEqual(lowercase__ , "launched the BigScience Research Workshop" )
def snake_case__ ( self : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase : List[Any] = self.tool(text=lowercase__ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(lowercase__ , "launched the BigScience Research Workshop" )
def snake_case__ ( self : Any ) ->str:
'''simple docstring'''
_UpperCamelCase : List[str] = self.remote_tool(text=lowercase__ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(lowercase__ , "launched the BigScience Research Workshop" )
| 435
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( UpperCamelCase_ , unittest.TestCase ):
_lowerCAmelCase =BioGptTokenizer
_lowerCAmelCase =False
def UpperCAmelCase__ ( self : Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
snake_case__ : List[Any] = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
snake_case__ : List[Any] = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(UpperCAmelCase__ ) )
def UpperCAmelCase__ ( self : List[str] , _lowerCamelCase : Optional[Any] ):
snake_case__ : Tuple = 'lower newer'
snake_case__ : Any = 'lower newer'
return input_text, output_text
def UpperCAmelCase__ ( self : Tuple ):
snake_case__ : str = BioGptTokenizer(self.vocab_file , self.merges_file )
snake_case__ : int = 'lower'
snake_case__ : Optional[int] = ['low', 'er</w>']
snake_case__ : Optional[Any] = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case__ : Dict = tokens + ['<unk>']
snake_case__ : List[str] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@slow
def UpperCAmelCase__ ( self : Any ):
snake_case__ : int = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
snake_case__ : Union[str, Any] = tokenizer.encode('sequence builders' , add_special_tokens=UpperCAmelCase__ )
snake_case__ : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCAmelCase__ )
snake_case__ : str = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
snake_case__ : str = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 721
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : Any = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowerCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.