code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def UpperCAmelCase ( A : str , A : Tuple , A : str , A : str , A : Tuple ):
'''simple docstring'''
for attribute in key.split('.' ):
_UpperCAmelCase = getattr(A , A )
if weight_type is not None:
_UpperCAmelCase = getattr(A , A ).shape
else:
_UpperCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
_UpperCAmelCase = value
elif weight_type == "weight_g":
_UpperCAmelCase = value
elif weight_type == "weight_v":
_UpperCAmelCase = value
elif weight_type == "bias":
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCAmelCase ( A : int , A : List[Any] , A : List[str] ):
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = fairseq_model.state_dict()
_UpperCAmelCase = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == 'group' , )
_UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
_UpperCAmelCase = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned):
_UpperCAmelCase = True
if "*" in mapped_key:
_UpperCAmelCase = name.split(A )[0].split('.' )[-2]
_UpperCAmelCase = mapped_key.replace('*' , A )
if "weight_g" in name:
_UpperCAmelCase = 'weight_g'
elif "weight_v" in name:
_UpperCAmelCase = 'weight_v'
elif "weight" in name:
_UpperCAmelCase = 'weight'
elif "bias" in name:
_UpperCAmelCase = 'bias'
else:
_UpperCAmelCase = None
set_recursively(A , A , A , A , A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(f'Unused weights: {unused_weights}' )
def UpperCAmelCase ( A : str , A : Union[str, Any] , A : List[Any] , A : List[str] , A : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = full_name.split('conv_layers.' )[-1]
_UpperCAmelCase = name.split('.' )
_UpperCAmelCase = int(items[0] )
_UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_UpperCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_UpperCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_UpperCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_UpperCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(A )
@torch.no_grad()
def UpperCAmelCase ( A : Optional[int] , A : Any , A : Union[str, Any]=None , A : Union[str, Any]=None , A : Optional[Any]=True ):
'''simple docstring'''
if config_path is not None:
_UpperCAmelCase = HubertConfig.from_pretrained(A )
else:
_UpperCAmelCase = HubertConfig()
if is_finetuned:
if dict_path:
_UpperCAmelCase = Dictionary.load(A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCAmelCase = target_dict.pad_index
_UpperCAmelCase = target_dict.bos_index
_UpperCAmelCase = target_dict.eos_index
_UpperCAmelCase = len(target_dict.symbols )
_UpperCAmelCase = os.path.join(A , 'vocab.json' )
if not os.path.isdir(A ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(A ) )
return
os.makedirs(A , exist_ok=A )
with open(A , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , A )
_UpperCAmelCase = WavaVecaCTCTokenizer(
A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=A , )
_UpperCAmelCase = True if config.feat_extract_norm == 'layer' else False
_UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
_UpperCAmelCase = WavaVecaProcessor(feature_extractor=A , tokenizer=A )
processor.save_pretrained(A )
_UpperCAmelCase = HubertForCTC(A )
else:
_UpperCAmelCase = HubertModel(A )
if is_finetuned:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_UpperCAmelCase = model[0].eval()
recursively_load_weights(A , A , A )
hf_wavavec.save_pretrained(A )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowercase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 573
|
"""simple docstring"""
from __future__ import annotations
class lowercase__ :
'''simple docstring'''
def __init__( self , snake_case ) -> None:
_UpperCAmelCase = order
# a_{0} ... a_{k}
_UpperCAmelCase = [1.0] + [0.0] * order
# b_{0} ... b_{k}
_UpperCAmelCase = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_UpperCAmelCase = [0.0] * self.order
# y[n-1] ... y[n-k]
_UpperCAmelCase = [0.0] * self.order
def lowerCamelCase_ ( self , snake_case , snake_case ) -> None:
if len(snake_case ) < self.order:
_UpperCAmelCase = [1.0, *a_coeffs]
if len(snake_case ) != self.order + 1:
_UpperCAmelCase = (
f'Expected a_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(snake_case )}'
)
raise ValueError(snake_case )
if len(snake_case ) != self.order + 1:
_UpperCAmelCase = (
f'Expected b_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(snake_case )}'
)
raise ValueError(snake_case )
_UpperCAmelCase = a_coeffs
_UpperCAmelCase = b_coeffs
def lowerCamelCase_ ( self , snake_case ) -> float:
_UpperCAmelCase = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_UpperCAmelCase = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_UpperCAmelCase = self.input_history[:-1]
_UpperCAmelCase = self.output_history[:-1]
_UpperCAmelCase = sample
_UpperCAmelCase = result
return result
| 573
| 1
|
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def a__ ( _UpperCamelCase : int ):
return 1.0 / (1.0 + np.exp(-_outputs ))
def a__ ( _UpperCamelCase : Dict ):
__lowerCamelCase = np.max(_outputs ,axis=-1 ,keepdims=_lowercase )
__lowerCamelCase = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 ,keepdims=_lowercase )
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """sigmoid"""
lowerCAmelCase__ = """softmax"""
lowerCAmelCase__ = """none"""
@add_end_docstrings(
lowerCAmelCase__ , r"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = False
lowerCAmelCase__ = ClassificationFunction.NONE
def __init__( self , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="" , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = tokenizer_kwargs
__lowerCamelCase = {}
if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None:
__lowerCamelCase = self.model.config.return_all_scores
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) or top_k is None:
__lowerCamelCase = top_k
__lowerCamelCase = False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , UpperCamelCase_ , )
if return_all_scores:
__lowerCamelCase = None
else:
__lowerCamelCase = 1
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
__lowerCamelCase = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = super().__call__(*UpperCamelCase_ , **UpperCamelCase_ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
__lowerCamelCase = 'top_k' not in kwargs
if isinstance(args[0] , UpperCamelCase_ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def lowerCamelCase ( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.framework
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return self.tokenizer(**UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) == 1 and isinstance(inputs[0] , UpperCamelCase_ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.model(**UpperCamelCase_ )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase=True ):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
__lowerCamelCase = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
__lowerCamelCase = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None:
__lowerCamelCase = self.model.config.function_to_apply
else:
__lowerCamelCase = ClassificationFunction.NONE
__lowerCamelCase = model_outputs['logits'][0]
__lowerCamelCase = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
__lowerCamelCase = sigmoid(UpperCamelCase_ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
__lowerCamelCase = softmax(UpperCamelCase_ )
elif function_to_apply == ClassificationFunction.NONE:
__lowerCamelCase = outputs
else:
raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
__lowerCamelCase = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(UpperCamelCase_ )
]
if not _legacy:
dict_scores.sort(key=lambda __UpperCAmelCase : x["score"] , reverse=UpperCamelCase_ )
if top_k is not None:
__lowerCamelCase = dict_scores[:top_k]
return dict_scores
| 704
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __lowerCAmelCase :
@staticmethod
def lowerCamelCase ( *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
pass
def a__ ( _UpperCamelCase : List[str] ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
a_ = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
lowerCAmelCase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''document-question-answering''' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '''''' ) ) )
__lowerCamelCase = '''What is the placebo?'''
__lowerCamelCase = [
{
'''image''': load_image(__UpperCAmelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = dqa_pipeline(__UpperCAmelCase , top_k=2 )
self.assertEqual(
__UpperCAmelCase , [
[
{'''score''': ANY(__UpperCAmelCase ), '''answer''': ANY(__UpperCAmelCase ), '''start''': ANY(__UpperCAmelCase ), '''end''': ANY(__UpperCAmelCase )},
{'''score''': ANY(__UpperCAmelCase ), '''answer''': ANY(__UpperCAmelCase ), '''start''': ANY(__UpperCAmelCase ), '''end''': ANY(__UpperCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''How many cats are there?'''
__lowerCamelCase = [
{'''score''': 0.0_001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase )
__lowerCamelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__lowerCamelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(__UpperCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
__lowerCamelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , words=__UpperCAmelCase , boxes=__UpperCAmelCase , top_k=2 )
self.assertEqual(__UpperCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''What is the invoice number?'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowerCamelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowerCamelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''What is the invoice number?'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowerCamelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowerCamelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__UpperCAmelCase )
__lowerCamelCase = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__UpperCAmelCase , revision='''3dc6de3''' , )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''What is the invoice number?'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__lowerCamelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__lowerCamelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__lowerCamelCase = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
__lowerCamelCase = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__UpperCAmelCase )
__lowerCamelCase = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__UpperCAmelCase , revision='''3dc6de3''' , max_seq_len=50 , )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''What is the invoice number?'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowerCamelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__lowerCamelCase = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
__lowerCamelCase = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''What is the invoice number?'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
| 622
| 0
|
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__UpperCAmelCase = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None ) -> Any:
lowerCAmelCase__ = None
lowerCAmelCase__ = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
lowerCAmelCase__ = os.path.abspath('''examples''' )
for item in os.listdir(lowerCamelCase_ ):
if item not in EXCLUDE_EXAMPLES:
lowerCAmelCase__ = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ) and ".py" in item_path:
with self.subTest(
tested_script=lowerCamelCase_ , feature_script=lowerCamelCase_ , tested_section='''main()''' if parser_only else '''training_function()''' , ):
lowerCAmelCase__ = compare_against_test(
os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = '''\n'''.join(lowerCamelCase_ )
if special_strings is not None:
for string in special_strings:
lowerCAmelCase__ = diff.replace(lowerCamelCase_ , '''''' )
self.assertEqual(lowerCamelCase_ , '''''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
self.one_complete_example('''complete_nlp_example.py''' , lowerCamelCase_ )
self.one_complete_example('''complete_nlp_example.py''' , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
lowerCAmelCase__ = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
self.one_complete_example('''complete_cv_example.py''' , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = False
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ) -> Optional[Any]:
super().setUpClass()
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase__ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ) -> str:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
lowerCAmelCase__ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
""".split()
lowerCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=lowerCamelCase_ )
self.assertNotIn('''epoch 0:''' , lowerCamelCase_ )
self.assertIn('''epoch 1:''' , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
""".split()
lowerCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=lowerCamelCase_ )
if torch.cuda.is_available():
lowerCAmelCase__ = torch.cuda.device_count()
else:
lowerCAmelCase__ = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , lowerCamelCase_ )
self.assertIn('''epoch 1:''' , lowerCamelCase_ )
else:
self.assertIn('''epoch 0:''' , lowerCamelCase_ )
self.assertIn('''epoch 1:''' , lowerCamelCase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
lowerCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=lowerCamelCase_ )
lowerCAmelCase__ = re.findall('''({.+})''' , lowerCamelCase_ )
lowerCAmelCase__ = [r for r in results if '''accuracy''' in r][-1]
lowerCAmelCase__ = ast.literal_eval(lowerCamelCase_ )
self.assertGreaterEqual(results['''accuracy'''] , 0.75 )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
lowerCAmelCase__ = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ , '''tracking''' ) ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 90
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ : str = logging.get_logger(__name__)
snake_case__ : Union[str, Any] = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case ( _snake_case ):
'''simple docstring'''
UpperCamelCase__ : Any = "deformable_detr"
UpperCamelCase__ : Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Dict , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Dict=3 , lowerCamelCase_ : Dict=300 , lowerCamelCase_ : List[str]=1024 , lowerCamelCase_ : Union[str, Any]=6 , lowerCamelCase_ : Tuple=1024 , lowerCamelCase_ : int=8 , lowerCamelCase_ : str=6 , lowerCamelCase_ : Union[str, Any]=1024 , lowerCamelCase_ : Union[str, Any]=8 , lowerCamelCase_ : Dict=0.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]="relu" , lowerCamelCase_ : int=256 , lowerCamelCase_ : Optional[Any]=0.1 , lowerCamelCase_ : Optional[Any]=0.0 , lowerCamelCase_ : str=0.0 , lowerCamelCase_ : List[Any]=0.02 , lowerCamelCase_ : Optional[Any]=1.0 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Dict=False , lowerCamelCase_ : Union[str, Any]="sine" , lowerCamelCase_ : Union[str, Any]="resnet50" , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Union[str, Any]=False , lowerCamelCase_ : Tuple=4 , lowerCamelCase_ : Union[str, Any]=4 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : Dict=False , lowerCamelCase_ : Any=300 , lowerCamelCase_ : int=False , lowerCamelCase_ : Optional[Any]=1 , lowerCamelCase_ : Tuple=5 , lowerCamelCase_ : Dict=2 , lowerCamelCase_ : Union[str, Any]=1 , lowerCamelCase_ : List[Any]=1 , lowerCamelCase_ : Optional[Any]=5 , lowerCamelCase_ : Dict=2 , lowerCamelCase_ : int=0.1 , lowerCamelCase_ : Union[str, Any]=0.25 , lowerCamelCase_ : Dict=False , **lowerCamelCase_ : List[Any] , ) ->List[str]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCAmelCase__ = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ = backbone_config.get("""model_type""" )
UpperCAmelCase__ = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ = config_class.from_dict(lowerCamelCase_ )
UpperCAmelCase__ = use_timm_backbone
UpperCAmelCase__ = backbone_config
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = num_queries
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = d_model
UpperCAmelCase__ = encoder_ffn_dim
UpperCAmelCase__ = encoder_layers
UpperCAmelCase__ = encoder_attention_heads
UpperCAmelCase__ = decoder_ffn_dim
UpperCAmelCase__ = decoder_layers
UpperCAmelCase__ = decoder_attention_heads
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = init_std
UpperCAmelCase__ = init_xavier_std
UpperCAmelCase__ = encoder_layerdrop
UpperCAmelCase__ = auxiliary_loss
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = backbone
UpperCAmelCase__ = use_pretrained_backbone
UpperCAmelCase__ = dilation
# deformable attributes
UpperCAmelCase__ = num_feature_levels
UpperCAmelCase__ = encoder_n_points
UpperCAmelCase__ = decoder_n_points
UpperCAmelCase__ = two_stage
UpperCAmelCase__ = two_stage_num_proposals
UpperCAmelCase__ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
UpperCAmelCase__ = class_cost
UpperCAmelCase__ = bbox_cost
UpperCAmelCase__ = giou_cost
# Loss coefficients
UpperCAmelCase__ = mask_loss_coefficient
UpperCAmelCase__ = dice_loss_coefficient
UpperCAmelCase__ = bbox_loss_coefficient
UpperCAmelCase__ = giou_loss_coefficient
UpperCAmelCase__ = eos_coefficient
UpperCAmelCase__ = focal_alpha
UpperCAmelCase__ = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCamelCase_ , **lowerCamelCase_ )
@property
def UpperCAmelCase ( self : Union[str, Any] ) ->int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self : List[Any] ) ->int:
'''simple docstring'''
return self.d_model
def UpperCAmelCase ( self : List[Any] ) ->Any:
'''simple docstring'''
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCAmelCase__ = self.backbone_config.to_dict()
UpperCAmelCase__ = self.__class__.model_type
return output
| 392
| 0
|
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def lowerCamelCase__ ( a , a=1000 ):
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__snake_case = n - 1
__snake_case = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__snake_case = 0
while count < prec:
__snake_case = random.randint(2 , n - 1 )
__snake_case = bin_exp_mod(a , a , a )
if b != 1:
__snake_case = True
for _ in range(a ):
if b == n - 1:
__snake_case = False
break
__snake_case = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_lowercase = abs(int(input("""Enter bound : """).strip()))
print("""Here's the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 427
|
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class a_ ( unittest.TestCase ):
def lowercase__ ( self : List[str] ):
__snake_case = 0
@slow
def lowercase__ ( self : str ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__lowerCAmelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__lowerCAmelCase ) , 0 )
def lowercase__ ( self : Optional[int] ):
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowercase__ ( self : Tuple ):
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def lowercase__ ( self : Any ):
__snake_case = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
# Check that tokenizer_type ≠ model_type
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , config=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowercase__ ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(__lowerCAmelCase , 'vocab.txt' ) )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , tokenizer_type='bert' , use_fast=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(__lowerCAmelCase , 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(__lowerCAmelCase , 'merges.txt' ) )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , tokenizer_type='gpt2' , use_fast=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@require_tokenizers
def lowercase__ ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(__lowerCAmelCase , 'vocab.txt' ) )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , tokenizer_type='bert' )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(__lowerCAmelCase , 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(__lowerCAmelCase , 'merges.txt' ) )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , tokenizer_type='gpt2' )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def lowercase__ ( self : int ):
with pytest.raises(__lowerCAmelCase ):
AutoTokenizer.from_pretrained('./' , tokenizer_type='xxx' )
@require_tokenizers
def lowercase__ ( self : Union[str, Any] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__snake_case = tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased' )
self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowerCAmelCase )
else:
self.assertEqual(tokenizer.do_lower_case , __lowerCAmelCase )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def lowercase__ ( self : str ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowerCAmelCase , 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' , ):
__snake_case = tokenizer_class.from_pretrained('julien-c/herlolip-not-exists' )
def lowercase__ ( self : Any ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
__snake_case = TOKENIZER_MAPPING.values()
__snake_case = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowerCAmelCase )
@require_tokenizers
def lowercase__ ( self : List[str] ):
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=__lowerCAmelCase ) , __lowerCAmelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' ) , __lowerCAmelCase )
@require_tokenizers
def lowercase__ ( self : Optional[int] ):
__snake_case = AutoTokenizer.from_pretrained('distilbert-base-uncased' , do_lower_case=__lowerCAmelCase )
__snake_case = 'Hello, world. How are you?'
__snake_case = tokenizer.tokenize(__lowerCAmelCase )
self.assertEqual('[UNK]' , tokens[0] )
__snake_case = AutoTokenizer.from_pretrained('microsoft/mpnet-base' , do_lower_case=__lowerCAmelCase )
__snake_case = tokenizer.tokenize(__lowerCAmelCase )
self.assertEqual('[UNK]' , tokens[0] )
@require_tokenizers
def lowercase__ ( self : Optional[Any] ):
__snake_case = AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config' )
self.assertEqual(type(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , '[UNK]' )
self.assertEqual(tokenizer.padding_side , 'right' )
self.assertEqual(tokenizer.truncation_side , 'right' )
def lowercase__ ( self : List[str] ):
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCAmelCase )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def lowercase__ ( self : Tuple ):
__snake_case = AutoTokenizer.from_pretrained('ctrl' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def lowercase__ ( self : Dict ):
# Check we can load the tokenizer config of an online model.
__snake_case = get_tokenizer_config('bert-base-cased' )
__snake_case = config.pop('_commit_hash' , __lowerCAmelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowerCAmelCase , {'do_lower_case': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
__snake_case = get_tokenizer_config(__lowerCAmelCase )
self.assertDictEqual(__lowerCAmelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCAmelCase )
__snake_case = get_tokenizer_config(__lowerCAmelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['tokenizer_class'] , 'BertTokenizer' )
def lowercase__ ( self : List[str] ):
try:
AutoConfig.register('custom' , __lowerCAmelCase )
AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase )
__snake_case = CustomTokenizer.from_pretrained(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCAmelCase )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase__ ( self : Union[str, Any] ):
try:
AutoConfig.register('custom' , __lowerCAmelCase )
# Can register in two steps
AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__lowerCAmelCase , fast_tokenizer_class=__lowerCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase , fast_tokenizer_class=__lowerCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoTokenizer.register(__lowerCAmelCase , fast_tokenizer_class=__lowerCAmelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case = BertTokenizerFast.from_pretrained(__lowerCAmelCase )
bert_tokenizer.save_pretrained(__lowerCAmelCase )
__snake_case = CustomTokenizerFast.from_pretrained(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCAmelCase )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , use_fast=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self : Dict ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCAmelCase ):
__snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCAmelCase ):
__snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase )
__snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCAmelCase )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , trust_remote_code=__lowerCAmelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
__snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCAmelCase )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' )
@require_tokenizers
def lowercase__ ( self : str ):
class a_ ( UpperCAmelCase__ ):
lowercase_ : str = False
class a_ ( UpperCAmelCase__ ):
lowercase_ : Optional[Any] = NewTokenizer
lowercase_ : str = False
try:
AutoConfig.register('custom' , __lowerCAmelCase )
AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase )
AutoTokenizer.register(__lowerCAmelCase , fast_tokenizer_class=__lowerCAmelCase )
# If remote code is not set, the default is to use local
__snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
__snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , use_fast=__lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
__snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
__snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
__snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertTrue(tokenizer.special_attribute_present )
__snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self : Optional[Any] ):
__snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=__lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
__snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def lowercase__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
__lowerCAmelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
__snake_case = AutoTokenizer.from_pretrained('bert-base' )
def lowercase__ ( self : str ):
with self.assertRaisesRegex(
__lowerCAmelCase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , revision='aaaaaa' )
def lowercase__ ( self : int ):
# Make sure we have cached the tokenizer.
__snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
__snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 427
| 1
|
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_50, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
] )
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=UpperCamelCase , )
assert hasattr(self , "env" )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = f'''{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'''
# distributed data settings
lowerCamelCase_ = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCamelCase , instance_count=UpperCamelCase , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase , py_version="py36" , )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
TrainingJobAnalytics(UpperCamelCase ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
# create estimator
lowerCamelCase_ = self.create_estimator(UpperCamelCase )
# run training
estimator.fit()
# result dataframe
lowerCamelCase_ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCamelCase_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
lowerCamelCase_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCamelCase_ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , UpperCamelCase )
| 675
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Dict = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : int = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
a_ : Any = {
"""junnyu/roformer_chinese_small""": 1536,
"""junnyu/roformer_chinese_base""": 1536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
a_ : List[Any] = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = RoFormerTokenizer
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , UpperCamelCase ) != do_lower_case
or pre_tok_state.get("strip_accents" , UpperCamelCase ) != strip_accents
):
lowerCamelCase_ = getattr(UpperCamelCase , pre_tok_state.pop("type" ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = pre_tok_class(**UpperCamelCase )
lowerCamelCase_ = do_lower_case
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = BertPreTokenizer()
return state
def __setstate__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = d
lowerCamelCase_ = self.__dict__["_tokenizer"].get_vocab()
lowerCamelCase_ = PreTokenizer.custom(JiebaPreTokenizer(UpperCamelCase ) )
def snake_case ( self , UpperCamelCase , UpperCamelCase=None ):
"""simple docstring"""
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=False , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = BertPreTokenizer()
return super().save_pretrained(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase )
| 675
| 1
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : int=1_8 , UpperCAmelCase__ : List[Any]=3_0 , UpperCAmelCase__ : Optional[int]=4_0_0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : List[Any]=[0.5, 0.5, 0.5] , ) ->Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else {"""height""": 1_8, """width""": 1_8}
SCREAMING_SNAKE_CASE : Optional[int] = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : List[Any] = min_resolution
SCREAMING_SNAKE_CASE : Optional[int] = max_resolution
SCREAMING_SNAKE_CASE : List[Any] = do_resize
SCREAMING_SNAKE_CASE : Dict = size
SCREAMING_SNAKE_CASE : List[str] = do_normalize
SCREAMING_SNAKE_CASE : Tuple = image_mean
SCREAMING_SNAKE_CASE : Optional[Any] = image_std
def _lowercase ( self : int ) ->Dict:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a__ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : str =DPTImageProcessor if is_vision_available() else None
def _lowercase ( self : Union[str, Any] ) ->Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = DPTImageProcessingTester(self )
@property
def _lowercase ( self : int ) ->Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : int ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , """size""" ) )
def _lowercase ( self : List[str] ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def _lowercase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Dict = image_processing(UpperCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _lowercase ( self : int ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Dict = image_processing(UpperCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _lowercase ( self : Optional[int] ) ->str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(UpperCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 446
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any ="""gpt_neox_japanese"""
def __init__( self : Any , UpperCAmelCase__ : Any=3_2_0_0_0 , UpperCAmelCase__ : Dict=2_5_6_0 , UpperCAmelCase__ : List[str]=3_2 , UpperCAmelCase__ : Optional[int]=3_2 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Optional[int]=1.00 , UpperCAmelCase__ : List[Any]=1_0_0_0_0 , UpperCAmelCase__ : Tuple=2_0_4_8 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : Dict=1e-5 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Tuple=3_1_9_9_6 , UpperCAmelCase__ : Tuple=3_1_9_9_9 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Union[str, Any]=0.0 , **UpperCAmelCase__ : Optional[Any] , ) ->Optional[Any]:
"""simple docstring"""
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_multiple_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Any = rotary_pct
SCREAMING_SNAKE_CASE : Tuple = rotary_emb_base
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = use_cache
SCREAMING_SNAKE_CASE : Any = attention_dropout
SCREAMING_SNAKE_CASE : str = hidden_dropout
| 446
| 1
|
"""simple docstring"""
A_ : List[Any] =8.31_4462 # Unit - J mol-1 K-1
def SCREAMING_SNAKE_CASE_ ( snake_case : float , snake_case : float , snake_case : float )-> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def SCREAMING_SNAKE_CASE_ ( snake_case : float , snake_case : float , snake_case : float )-> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 650
|
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
A_ : int =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class __a ( lowerCAmelCase__ ):
def __init__( self , *a__ , **a__ ):
super().__init__(*a__ , **a__ )
self.check_model_type(a__ )
def snake_case_ ( self , a__=None , a__=None , a__=None , **a__ ):
_lowerCamelCase , _lowerCamelCase = {}, {}
if padding is not None:
_lowerCamelCase = padding
if truncation is not None:
_lowerCamelCase = truncation
if top_k is not None:
_lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , a__ , a__ = None , **a__ ):
if isinstance(a__ , (Image.Image, str) ) and isinstance(a__ , a__ ):
_lowerCamelCase = {'image': image, 'question': question}
else:
_lowerCamelCase = image
_lowerCamelCase = super().__call__(a__ , **a__ )
return results
def snake_case_ ( self , a__ , a__=False , a__=False ):
_lowerCamelCase = load_image(inputs['image'] )
_lowerCamelCase = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=a__ , truncation=a__ )
_lowerCamelCase = self.image_processor(images=a__ , return_tensors=self.framework )
model_inputs.update(a__ )
return model_inputs
def snake_case_ ( self , a__ ):
_lowerCamelCase = self.model(**a__ )
return model_outputs
def snake_case_ ( self , a__ , a__=5 ):
if top_k > self.model.config.num_labels:
_lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
_lowerCamelCase = model_outputs.logits.sigmoid()[0]
_lowerCamelCase , _lowerCamelCase = probs.topk(a__ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
_lowerCamelCase = scores.tolist()
_lowerCamelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(a__ , a__ )]
| 650
| 1
|
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=0.6 , _SCREAMING_SNAKE_CASE=None , ) -> Dict:
snake_case_ : Optional[Any] = parent
snake_case_ : List[Any] = batch_size
snake_case_ : Tuple = image_size
snake_case_ : List[Any] = patch_size
snake_case_ : Dict = num_channels
snake_case_ : Optional[Any] = is_training
snake_case_ : Tuple = use_labels
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : Any = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : List[Any] = intermediate_size
snake_case_ : List[str] = hidden_act
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : Dict = type_sequence_label_size
snake_case_ : str = initializer_range
snake_case_ : Dict = mask_ratio
snake_case_ : List[str] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
snake_case_ : Optional[Any] = (image_size // patch_size) ** 2
snake_case_ : Union[str, Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Any = None
if self.use_labels:
snake_case_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Tuple = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ) -> Tuple:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ : Optional[int] = TFViTMAEModel(config=_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
snake_case_ : Dict = TFViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
snake_case_ : str = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
# expected sequence length = num_patches
snake_case_ : int = (self.image_size // self.patch_size) ** 2
snake_case_ : Optional[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
snake_case_ : Optional[Any] = 1
snake_case_ : Union[str, Any] = TFViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : Optional[int] = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
snake_case_ : int = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ : Tuple = self.prepare_config_and_inputs()
((snake_case_) , (snake_case_) , (snake_case_)) : Dict = config_and_inputs
snake_case_ : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A : str = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
A : List[Any] = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
A : Dict = False
A : Union[str, Any] = False
A : Tuple = False
A : Optional[int] = False
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : Optional[int] = TFViTMAEModelTester(self )
snake_case_ : List[str] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
pass
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ , snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : str = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case_ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) )
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ , snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Optional[int] = model_class(_SCREAMING_SNAKE_CASE )
snake_case_ : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Dict = [*signature.parameters.keys()]
snake_case_ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> List[Any]:
# make the mask reproducible
np.random.seed(2 )
snake_case_ , snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : str = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : Any = model_class(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : str = model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = copy.deepcopy(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
snake_case_ : Optional[Any] = model(**_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = outputs_dict[0].numpy()
snake_case_ : Any = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def _lowerCAmelCase ( self ) -> Any:
# make the mask reproducible
np.random.seed(2 )
snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Tuple = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_SCREAMING_SNAKE_CASE ):
snake_case_ : Union[str, Any] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_SCREAMING_SNAKE_CASE ):
snake_case_ : int = v.numpy()
else:
snake_case_ : Tuple = np.array(_SCREAMING_SNAKE_CASE )
return inputs_np_dict
for model_class in self.all_model_classes:
snake_case_ : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = prepare_numpy_arrays(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = model(**_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE )
self.assert_outputs_same(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
# make masks reproducible
np.random.seed(2 )
snake_case_ : Any = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
snake_case_ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ : Union[str, Any] = tf.constant(_SCREAMING_SNAKE_CASE )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
snake_case_ : Tuple = tf_noise
super().check_pt_tf_models(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> int:
# make mask reproducible
np.random.seed(2 )
snake_case_ , snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Union[str, Any] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_SCREAMING_SNAKE_CASE )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),)
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_SCREAMING_SNAKE_CASE , "_keras_serializable" , _SCREAMING_SNAKE_CASE )
}
snake_case_ : Tuple = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ : Dict = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
snake_case_ : Tuple = main_layer_class(_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
snake_case_ : Optional[int] = tf.keras.Model(_SCREAMING_SNAKE_CASE , outputs=main_layer(_SCREAMING_SNAKE_CASE ) )
snake_case_ : Tuple = model(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Dict = os.path.join(_SCREAMING_SNAKE_CASE , "keras_model.h5" )
model.save(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = tf.keras.models.load_model(
_SCREAMING_SNAKE_CASE , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_SCREAMING_SNAKE_CASE , tf.keras.Model )
snake_case_ : List[str] = model(_SCREAMING_SNAKE_CASE )
self.assert_outputs_same(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def _lowerCAmelCase ( self ) -> List[Any]:
# make mask reproducible
np.random.seed(2 )
snake_case_ , snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
snake_case_ : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ : Union[str, Any] = outputs.last_hidden_state.numpy()
snake_case_ : Optional[Any] = 0
else:
snake_case_ : Tuple = outputs.logits.numpy()
snake_case_ : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE , saved_model=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = model_class.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ : str = after_outputs["last_hidden_state"].numpy()
snake_case_ : Optional[int] = 0
else:
snake_case_ : str = after_outputs["logits"].numpy()
snake_case_ : Optional[int] = 0
snake_case_ : List[str] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-5 )
def _lowerCAmelCase ( self ) -> List[Any]:
# make mask reproducible
np.random.seed(2 )
snake_case_ , snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : Dict = model_class(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
snake_case_ : Tuple = model_class.from_config(model.config )
snake_case_ : Optional[int] = new_model(_SCREAMING_SNAKE_CASE ) # Build model
new_model.set_weights(model.get_weights() )
snake_case_ : List[str] = new_model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE )
self.assert_outputs_same(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowerCAmelCase ( self ) -> List[str]:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def _lowerCAmelCase ( self ) -> List[Any]:
pass
@slow
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Any = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( ):
snake_case_ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ) -> Tuple:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self ) -> Optional[int]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
snake_case_ : List[Any] = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
snake_case_ : Union[str, Any] = self.default_image_processor
snake_case_ : List[str] = prepare_img()
snake_case_ : Dict = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
snake_case_ : Optional[Any] = ViTMAEConfig()
snake_case_ : Optional[int] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
snake_case_ : str = np.random.uniform(size=(1, num_patches) )
# forward pass
snake_case_ : Any = model(**_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE )
# verify the logits
snake_case_ : Union[str, Any] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
| 114
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : Union[str, Any] = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 114
| 1
|
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
_lowerCAmelCase : List[str] = TypeVar('''T''')
class A_ ( Generic[T] ):
lowerCAmelCase__ = 42 # Cache store of keys
lowerCAmelCase__ = 42 # References of the keys in cache
lowerCAmelCase__ = 1_0 # Maximum capacity of cache
def __init__( self: Optional[Any] ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : Tuple = deque()
_lowerCamelCase : Tuple = set()
if not n:
_lowerCamelCase : List[Any] = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
_lowerCamelCase : List[str] = n
def _lowercase ( self: str ,__lowerCAmelCase: T ):
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_lowerCamelCase : Union[str, Any] = self.dq_store.pop()
self.key_reference.remove(SCREAMING_SNAKE_CASE__ )
else:
self.dq_store.remove(SCREAMING_SNAKE_CASE__ )
self.dq_store.appendleft(SCREAMING_SNAKE_CASE__ )
self.key_reference.add(SCREAMING_SNAKE_CASE__ )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for k in self.dq_store:
print(SCREAMING_SNAKE_CASE__ )
def __repr__( self: List[str] ):
'''simple docstring'''
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 46
|
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Tuple = DDIMPipeline
a__ : List[str] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
a__ : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
a__ : str = False
def __A ( self : Optional[int] ) -> Any:
torch.manual_seed(0 )
__lowerCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
__lowerCamelCase = DDIMScheduler()
__lowerCamelCase = {'''unet''': unet, '''scheduler''': scheduler}
return components
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=0 ) -> Tuple:
if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ):
__lowerCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self : Tuple ) -> List[Any]:
__lowerCamelCase = '''cpu'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = pipe(**SCREAMING_SNAKE_CASE__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
__lowerCamelCase = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
__lowerCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-3 )
def __A ( self : Optional[int] ) -> Any:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __A ( self : Union[str, Any] ) -> Optional[Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def __A ( self : Optional[int] ) -> List[Any]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def __A ( self : List[Any] ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : List[Any] ) -> Tuple:
__lowerCamelCase = '''google/ddpm-cifar10-32'''
__lowerCamelCase = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = DDIMScheduler()
__lowerCamelCase = DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddim.to(SCREAMING_SNAKE_CASE__ )
ddim.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = ddim(generator=SCREAMING_SNAKE_CASE__ , eta=0.0 , output_type='''numpy''' ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self : List[str] ) -> Optional[int]:
__lowerCamelCase = '''google/ddpm-ema-bedroom-256'''
__lowerCamelCase = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddpm.to(SCREAMING_SNAKE_CASE__ )
ddpm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = ddpm(generator=SCREAMING_SNAKE_CASE__ , output_type='''numpy''' ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
__lowerCamelCase = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 298
| 0
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : List[Any] = 1
snake_case__ : Any = 3
snake_case__ : Optional[int] = (3_2, 3_2)
snake_case__ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def __lowerCamelCase ( self :Union[str, Any] ):
torch.manual_seed(0 )
snake_case__ : str = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=3_2 ,)
return model
@property
def __lowerCamelCase ( self :Tuple ):
torch.manual_seed(0 )
snake_case__ : Any = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,)
return model
@property
def __lowerCamelCase ( self :Any ):
torch.manual_seed(0 )
snake_case__ : Any = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,)
return CLIPTextModel(lowerCamelCase_ )
@property
def __lowerCamelCase ( self :Optional[Any] ):
def extract(*__lowercase :Union[str, Any] ,**__lowercase :List[str] ):
class a :
def __init__( self :Any ):
snake_case__ : Any = torch.ones([0] )
def __lowerCamelCase ( self :Any ,__lowercase :Tuple ):
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Any = self.dummy_cond_unet
snake_case__ : int = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,clip_sample=lowerCamelCase_ ,set_alpha_to_one=lowerCamelCase_ ,)
snake_case__ : Optional[int] = self.dummy_vae
snake_case__ : Optional[int] = self.dummy_text_encoder
snake_case__ : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
snake_case__ : List[str] = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
snake_case__ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
snake_case__ : Optional[int] = '''A painting of a squirrel eating a burger'''
snake_case__ : Tuple = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
snake_case__ : List[Any] = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='''np''' )
snake_case__ : Tuple = output.images
snake_case__ : Tuple = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
snake_case__ : Union[str, Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='''np''' ,return_dict=lowerCamelCase_ ,)[0]
snake_case__ : Any = image[0, -3:, -3:, -1]
snake_case__ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCamelCase ( self :int ):
snake_case__ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case__ : int = self.dummy_cond_unet
snake_case__ : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
snake_case__ : List[str] = self.dummy_vae
snake_case__ : Optional[int] = self.dummy_text_encoder
snake_case__ : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
snake_case__ : Optional[int] = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
snake_case__ : List[Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
snake_case__ : str = '''A painting of a squirrel eating a burger'''
snake_case__ : Dict = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
snake_case__ : Optional[int] = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='''np''' )
snake_case__ : List[Any] = output.images
snake_case__ : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
snake_case__ : str = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='''np''' ,return_dict=lowerCamelCase_ ,)[0]
snake_case__ : Any = image[0, -3:, -3:, -1]
snake_case__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : Optional[int] = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCamelCase ( self :str ):
snake_case__ : str = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' ,safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ ,lowerCamelCase_ )
assert isinstance(pipe.scheduler ,lowerCamelCase_ )
assert pipe.safety_checker is None
snake_case__ : List[str] = pipe('''example prompt''' ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
snake_case__ : Dict = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
snake_case__ : List[str] = pipe('''example prompt''' ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' ,'''This test requires a GPU''' )
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Dict = self.dummy_cond_unet
snake_case__ : Tuple = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
snake_case__ : Dict = self.dummy_vae
snake_case__ : List[Any] = self.dummy_text_encoder
snake_case__ : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
snake_case__ : Dict = unet.half()
snake_case__ : Optional[int] = vae.half()
snake_case__ : Optional[Any] = bert.half()
# make sure here that pndm scheduler skips prk
snake_case__ : List[str] = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
snake_case__ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
snake_case__ : str = '''A painting of a squirrel eating a burger'''
snake_case__ : Dict = sd_pipe([prompt] ,num_inference_steps=2 ,output_type='''np''' ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Tuple = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' ,safety_checker=lowerCamelCase_ )
snake_case__ : Optional[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
snake_case__ : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
snake_case__ : int = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
snake_case__ : Union[str, Any] = 4_0_0_3_6_6_0_3_4_6
snake_case__ : Optional[int] = 7
# without safety guidance (sld_guidance_scale = 0)
snake_case__ : Any = torch.manual_seed(lowerCamelCase_ )
snake_case__ : Dict = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=5_0 ,output_type='''np''' ,width=5_1_2 ,height=5_1_2 ,sld_guidance_scale=0 ,)
snake_case__ : Union[str, Any] = output.images
snake_case__ : List[str] = image[0, -3:, -3:, -1]
snake_case__ : Any = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
snake_case__ : str = torch.manual_seed(lowerCamelCase_ )
snake_case__ : Union[str, Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=5_0 ,output_type='''np''' ,width=5_1_2 ,height=5_1_2 ,sld_guidance_scale=2_0_0_0 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
snake_case__ : str = output.images
snake_case__ : List[str] = image[0, -3:, -3:, -1]
snake_case__ : Any = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Tuple = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' ,safety_checker=lowerCamelCase_ )
snake_case__ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
snake_case__ : Dict = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
snake_case__ : int = '''padme amidala taking a bath artwork, safe for work, no nudity'''
snake_case__ : List[Any] = 2_7_3_4_9_7_1_7_5_5
snake_case__ : str = 7
snake_case__ : List[str] = torch.manual_seed(lowerCamelCase_ )
snake_case__ : Optional[int] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=5_0 ,output_type='''np''' ,width=5_1_2 ,height=5_1_2 ,sld_guidance_scale=0 ,)
snake_case__ : Optional[int] = output.images
snake_case__ : Dict = image[0, -3:, -3:, -1]
snake_case__ : List[str] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
snake_case__ : Dict = torch.manual_seed(lowerCamelCase_ )
snake_case__ : Union[str, Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=5_0 ,output_type='''np''' ,width=5_1_2 ,height=5_1_2 ,sld_guidance_scale=2_0_0_0 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
snake_case__ : List[Any] = output.images
snake_case__ : List[str] = image[0, -3:, -3:, -1]
snake_case__ : int = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCamelCase ( self :Any ):
snake_case__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
snake_case__ : Optional[int] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
snake_case__ : Tuple = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
snake_case__ : List[str] = 1_0_4_4_3_5_5_2_3_4
snake_case__ : Any = 1_2
snake_case__ : int = torch.manual_seed(lowerCamelCase_ )
snake_case__ : Any = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=5_0 ,output_type='''np''' ,width=5_1_2 ,height=5_1_2 ,sld_guidance_scale=0 ,)
snake_case__ : Any = output.images
snake_case__ : List[str] = image[0, -3:, -3:, -1]
snake_case__ : Any = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
snake_case__ : int = torch.manual_seed(lowerCamelCase_ )
snake_case__ : Union[str, Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=5_0 ,output_type='''np''' ,width=5_1_2 ,height=5_1_2 ,sld_guidance_scale=2_0_0_0 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
snake_case__ : Union[str, Any] = output.images
snake_case__ : Dict = image[0, -3:, -3:, -1]
snake_case__ : str = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 718
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase="pt" ) -> List[Any]:
"""simple docstring"""
snake_case__ : List[Any] = {'''add_prefix_space''': True} if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and not line.startswith(''' ''' ) else {}
snake_case__ : int = padding_side
return tokenizer(
[line] , max_length=__lowerCAmelCase , padding='''max_length''' if pad_to_max_length else None , truncation=__lowerCAmelCase , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , ) -> int:
"""simple docstring"""
snake_case__ : Tuple = input_ids.ne(__lowerCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class a ( __lowerCamelCase ):
def __init__( self :str ,__lowercase :List[Any] ,__lowercase :Optional[int] ,__lowercase :str ,__lowercase :List[Any] ,__lowercase :Union[str, Any]="train" ,__lowercase :Any=None ,__lowercase :List[str]=None ,__lowercase :Any=None ,__lowercase :Optional[Any]="" ,):
super().__init__()
snake_case__ : Dict = Path(__lowercase ).joinpath(type_path + '''.source''' )
snake_case__ : List[Any] = Path(__lowercase ).joinpath(type_path + '''.target''' )
snake_case__ : List[Any] = self.get_char_lens(self.src_file )
snake_case__ : List[str] = max_source_length
snake_case__ : str = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
snake_case__ : Any = tokenizer
snake_case__ : int = prefix
if n_obs is not None:
snake_case__ : Tuple = self.src_lens[:n_obs]
snake_case__ : Optional[int] = src_lang
snake_case__ : int = tgt_lang
def __len__( self :str ):
return len(self.src_lens )
def __getitem__( self :Tuple ,__lowercase :List[str] ):
snake_case__ : Optional[Any] = index + 1 # linecache starts at 1
snake_case__ : Dict = self.prefix + linecache.getline(str(self.src_file ) ,__lowercase ).rstrip('''\n''' )
snake_case__ : List[str] = linecache.getline(str(self.tgt_file ) ,__lowercase ).rstrip('''\n''' )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,__lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
snake_case__ : Union[str, Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,__lowercase ) else self.tokenizer
)
snake_case__ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,__lowercase ) else self.tokenizer
snake_case__ : List[Any] = encode_line(__lowercase ,__lowercase ,self.max_source_length ,'''right''' )
snake_case__ : Any = encode_line(__lowercase ,__lowercase ,self.max_target_length ,'''right''' )
snake_case__ : Optional[int] = source_inputs['''input_ids'''].squeeze()
snake_case__ : Optional[Any] = target_inputs['''input_ids'''].squeeze()
snake_case__ : Optional[int] = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __lowerCamelCase ( __lowercase :List[Any] ):
return [len(__lowercase ) for x in Path(__lowercase ).open().readlines()]
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :int ):
snake_case__ : List[str] = torch.stack([x['''input_ids'''] for x in batch] )
snake_case__ : Any = torch.stack([x['''attention_mask'''] for x in batch] )
snake_case__ : List[Any] = torch.stack([x['''decoder_input_ids'''] for x in batch] )
snake_case__ : Any = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,__lowercase )
else self.tokenizer.pad_token_id
)
snake_case__ : Any = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,__lowercase )
else self.tokenizer.pad_token_id
)
snake_case__ : List[Any] = trim_batch(__lowercase ,__lowercase )
snake_case__ , snake_case__ : int = trim_batch(__lowercase ,__lowercase ,attention_mask=__lowercase )
snake_case__ : Union[str, Any] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
A__ = getLogger(__name__)
def _lowerCAmelCase ( __lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
return list(itertools.chain.from_iterable(__lowerCAmelCase ) )
def _lowerCAmelCase ( __lowerCAmelCase ) -> None:
"""simple docstring"""
snake_case__ : Tuple = get_git_info()
save_json(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''git_log.json''' ) )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=4 , **__lowerCAmelCase ) -> Dict:
"""simple docstring"""
with open(__lowerCAmelCase , '''w''' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase , indent=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
with open(__lowerCAmelCase ) as f:
return json.load(__lowerCAmelCase )
def _lowerCAmelCase ( ) -> List[str]:
"""simple docstring"""
snake_case__ : Union[str, Any] = git.Repo(search_parent_directories=__lowerCAmelCase )
snake_case__ : Optional[Any] = {
'''repo_id''': str(__lowerCAmelCase ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> List:
"""simple docstring"""
return list(map(__lowerCAmelCase , __lowerCAmelCase ) )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
with open(__lowerCAmelCase , '''wb''' ) as f:
return pickle.dump(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
def remove_articles(__lowerCAmelCase ):
return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase ):
snake_case__ : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Dict = normalize_answer(__lowerCAmelCase ).split()
snake_case__ : Dict = normalize_answer(__lowerCAmelCase ).split()
snake_case__ : int = Counter(__lowerCAmelCase ) & Counter(__lowerCAmelCase )
snake_case__ : Union[str, Any] = sum(common.values() )
if num_same == 0:
return 0
snake_case__ : List[str] = 1.0 * num_same / len(__lowerCAmelCase )
snake_case__ : Dict = 1.0 * num_same / len(__lowerCAmelCase )
snake_case__ : Union[str, Any] = (2 * precision * recall) / (precision + recall)
return fa
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
"""simple docstring"""
return normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
"""simple docstring"""
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
snake_case__ : Optional[int] = 0
for hypo, pred in zip(__lowerCAmelCase , __lowerCAmelCase ):
em += exact_match_score(__lowerCAmelCase , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
em /= len(__lowerCAmelCase )
return {"em": em}
def _lowerCAmelCase ( __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
return model_prefix.startswith('''rag''' )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
"""simple docstring"""
snake_case__ : str = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
snake_case__ : Union[str, Any] = '''dropout_rate'''
for p in extra_params:
if getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not hasattr(__lowerCAmelCase , __lowerCAmelCase ) and not hasattr(__lowerCAmelCase , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(__lowerCAmelCase ) )
delattr(__lowerCAmelCase , __lowerCAmelCase )
continue
snake_case__ : Optional[Any] = p if hasattr(__lowerCAmelCase , __lowerCAmelCase ) else equivalent_param[p]
setattr(__lowerCAmelCase , __lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
delattr(__lowerCAmelCase , __lowerCAmelCase )
return hparams, config
| 219
| 0
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase )
class __lowercase ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"text": Value("string" )} )
SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"summary": Value("string" )} )
SCREAMING_SNAKE_CASE : str = "text"
SCREAMING_SNAKE_CASE : str = "summary"
@property
def __magic_name__ ( self )-> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 605
|
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
if not postfix_notation:
return 0
_SCREAMING_SNAKE_CASE = {'+', '-', '*', '/'}
_SCREAMING_SNAKE_CASE = []
for token in postfix_notation:
if token in operations:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCAmelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 605
| 1
|
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int]="" ) -> Union[str, Any]:
_lowercase = tempfile.mkdtemp()
return os.path.join(_lowerCamelCase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
_lowercase = torch.rand(12 ,dtype=torch.floataa ) - 0.5
_lowercase = AgentAudio(__A )
_lowercase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__A ,agent_type.to_raw() ,atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(__A ) )
# Ensure that the file contains the same value as the original tensor
_lowercase = sf.read(__A )
self.assertTrue(torch.allclose(__A ,torch.tensor(__A ) ,atol=1e-4 ) )
def __UpperCAmelCase ( self : List[str] ) -> str:
_lowercase = torch.rand(12 ,dtype=torch.floataa ) - 0.5
_lowercase = get_new_path(suffix='.wav' )
sf.write(__A ,__A ,1_6000 )
_lowercase = AgentAudio(__A )
self.assertTrue(torch.allclose(__A ,agent_type.to_raw() ,atol=1e-4 ) )
self.assertEqual(agent_type.to_string() ,__A )
@require_vision
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ) -> Tuple:
_lowercase = torch.randint(0 ,256 ,(64, 64, 3) )
_lowercase = AgentImage(__A )
_lowercase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__A ,agent_type._tensor ,atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() ,Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__A ) )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
_lowercase = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / "000000039769.png"
_lowercase = Image.open(__A )
_lowercase = AgentImage(__A )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__A ) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
_lowercase = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / "000000039769.png"
_lowercase = Image.open(__A )
_lowercase = AgentImage(__A )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__A ) )
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase = "Hey!"
_lowercase = AgentText(__A )
self.assertEqual(__A ,agent_type.to_string() )
self.assertEqual(__A ,agent_type.to_raw() )
self.assertEqual(__A ,__A )
| 704
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = '''gptj'''
SCREAMING_SNAKE_CASE_ : int = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : str ,__A : List[str]=5_0400 ,__A : Dict=2048 ,__A : List[str]=4096 ,__A : Dict=28 ,__A : Optional[int]=16 ,__A : Tuple=64 ,__A : Optional[int]=None ,__A : Optional[int]="gelu_new" ,__A : Dict=0.0 ,__A : List[str]=0.0 ,__A : Optional[int]=0.0 ,__A : str=1e-5 ,__A : List[Any]=0.02 ,__A : str=True ,__A : int=5_0256 ,__A : Union[str, Any]=5_0256 ,__A : int=False ,**__A : List[Any] ,) -> str:
_lowercase = vocab_size
_lowercase = n_positions
_lowercase = n_embd
_lowercase = n_layer
_lowercase = n_head
_lowercase = n_inner
_lowercase = rotary_dim
_lowercase = activation_function
_lowercase = resid_pdrop
_lowercase = embd_pdrop
_lowercase = attn_pdrop
_lowercase = layer_norm_epsilon
_lowercase = initializer_range
_lowercase = use_cache
_lowercase = bos_token_id
_lowercase = eos_token_id
super().__init__(
bos_token_id=__A ,eos_token_id=__A ,tie_word_embeddings=__A ,**__A )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple ,__A : PretrainedConfig ,__A : str = "default" ,__A : List[PatchingSpec] = None ,__A : bool = False ,) -> Any:
super().__init__(__A ,task=__A ,patching_specs=__A ,use_past=__A )
if not getattr(self._config ,'pad_token_id' ,__A ):
# TODO: how to do that better?
_lowercase = 0
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
_lowercase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
_lowercase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_lowercase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
return self._config.n_layer
@property
def __UpperCAmelCase ( self : Tuple ) -> int:
return self._config.n_head
def __UpperCAmelCase ( self : Any ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = super(__A ,self ).generate_dummy_inputs(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
# We need to order the input in the way they appears in the forward()
_lowercase = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase = seqlen + 2
_lowercase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowercase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
_lowercase = common_inputs['attention_mask']
if self.use_past:
_lowercase = ordered_inputs['attention_mask'].dtype
_lowercase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__A ,__A ,dtype=__A )] ,dim=1 )
return ordered_inputs
@property
def __UpperCAmelCase ( self : str ) -> int:
return 13
| 535
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__snake_case : Dict =logging.get_logger(__name__)
__snake_case : Optional[Any] ={
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class lowerCamelCase__ ( a_):
'''simple docstring'''
snake_case_ ="""van"""
def __init__(self ,__lowerCamelCase=2_24 ,__lowerCamelCase=3 ,__lowerCamelCase=[7, 3, 3, 3] ,__lowerCamelCase=[4, 2, 2, 2] ,__lowerCamelCase=[64, 1_28, 3_20, 5_12] ,__lowerCamelCase=[3, 3, 12, 3] ,__lowerCamelCase=[8, 8, 4, 4] ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.02 ,__lowerCamelCase=1e-6 ,__lowerCamelCase=1e-2 ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.0 ,**__lowerCamelCase ,) -> Any:
"""simple docstring"""
super().__init__(**a_ )
lowerCAmelCase__ : Tuple = image_size
lowerCAmelCase__ : Optional[int] = num_channels
lowerCAmelCase__ : List[Any] = patch_sizes
lowerCAmelCase__ : Optional[int] = strides
lowerCAmelCase__ : Any = hidden_sizes
lowerCAmelCase__ : Optional[int] = depths
lowerCAmelCase__ : int = mlp_ratios
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : str = initializer_range
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : str = layer_scale_init_value
lowerCAmelCase__ : List[str] = drop_path_rate
lowerCAmelCase__ : List[Any] = dropout_rate
| 647
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 237
| 0
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowercase( __snake_case : str ,__snake_case : Union[str, Any] ,__snake_case : Union[str, Any]=10_24 ,__snake_case : Optional[Any]=10_24 ,__snake_case : str=False ,**__snake_case : Tuple ) -> int:
__snake_case = AutoTokenizer.from_pretrained(_lowerCamelCase )
__snake_case = SeqaSeqDataset(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,type_path='train' ,**_lowerCamelCase )
__snake_case = tok.pad_token_id
def get_lens(__snake_case : int ):
__snake_case = tqdm(
DataLoader(_lowerCamelCase ,batch_size=5_12 ,num_workers=8 ,shuffle=_lowerCamelCase ,collate_fn=ds.collate_fn ) ,desc=str(ds.len_file ) ,)
__snake_case = []
for batch in dl:
__snake_case = batch['input_ids'].ne(_lowerCamelCase ).sum(1 ).tolist()
__snake_case = batch['labels'].ne(_lowerCamelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(_lowerCamelCase ,_lowerCamelCase ):
max_lens.append(max(_lowerCamelCase ,_lowerCamelCase ) )
else:
max_lens.extend(_lowerCamelCase )
return max_lens
__snake_case = get_lens(_lowerCamelCase )
__snake_case = SeqaSeqDataset(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,type_path='val' ,**_lowerCamelCase )
__snake_case = get_lens(_lowerCamelCase )
pickle_save(_lowerCamelCase ,train_ds.len_file )
pickle_save(_lowerCamelCase ,val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 700
|
from ..utils import DummyObject, requires_backends
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
| 345
| 0
|
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 243
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase =logging.get_logger(__name__)
lowerCamelCase ={
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''gpt_neox_japanese'''
def __init__( self , __SCREAMING_SNAKE_CASE=3_2_0_0_0 , __SCREAMING_SNAKE_CASE=2_5_6_0 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=1.00 , __SCREAMING_SNAKE_CASE=1_0_0_0_0 , __SCREAMING_SNAKE_CASE=2_0_4_8 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_1_9_9_6 , __SCREAMING_SNAKE_CASE=3_1_9_9_9 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , **__SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = vocab_size
UpperCamelCase__ : int = max_position_embeddings
UpperCamelCase__ : Union[str, Any] = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : List[Any] = num_attention_heads
UpperCamelCase__ : int = intermediate_multiple_size
UpperCamelCase__ : List[Any] = hidden_act
UpperCamelCase__ : Tuple = rotary_pct
UpperCamelCase__ : Optional[int] = rotary_emb_base
UpperCamelCase__ : Optional[Any] = initializer_range
UpperCamelCase__ : Optional[int] = layer_norm_eps
UpperCamelCase__ : Optional[Any] = use_cache
UpperCamelCase__ : List[str] = attention_dropout
UpperCamelCase__ : Optional[int] = hidden_dropout
| 285
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class a ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = StableDiffusionPanoramaPipeline
A__ : Dict = TEXT_TO_IMAGE_PARAMS
A__ : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
A__ : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
A__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def __A ( self ) -> Any:
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_UpperCAmelCase = DDIMScheduler()
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_UpperCAmelCase = CLIPTextModel(snake_case_ )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __A ( self , snake_case_ , snake_case_=0 ) -> Union[str, Any]:
_UpperCAmelCase = torch.manual_seed(snake_case_ )
_UpperCAmelCase = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __A ( self ) -> List[Any]:
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionPanoramaPipeline(**snake_case_ )
_UpperCAmelCase = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_dummy_inputs(snake_case_ )
_UpperCAmelCase = sd_pipe(**snake_case_ ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> int:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ) -> Union[str, Any]:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def __A ( self ) -> str:
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionPanoramaPipeline(**snake_case_ )
_UpperCAmelCase = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_dummy_inputs(snake_case_ )
_UpperCAmelCase = "french fries"
_UpperCAmelCase = sd_pipe(**snake_case_ , negative_prompt=snake_case_ )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> str:
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionPanoramaPipeline(**snake_case_ )
_UpperCAmelCase = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_dummy_inputs(snake_case_ )
_UpperCAmelCase = sd_pipe(**snake_case_ , view_batch_size=2 )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> Dict:
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" )
_UpperCAmelCase = StableDiffusionPanoramaPipeline(**snake_case_ )
_UpperCAmelCase = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_dummy_inputs(snake_case_ )
_UpperCAmelCase = sd_pipe(**snake_case_ ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> Dict:
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = PNDMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , skip_prk_steps=snake_case_ )
_UpperCAmelCase = StableDiffusionPanoramaPipeline(**snake_case_ )
_UpperCAmelCase = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_dummy_inputs(snake_case_ )
_UpperCAmelCase = sd_pipe(**snake_case_ ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self , snake_case_=0 ) -> Any:
_UpperCAmelCase = torch.manual_seed(snake_case_ )
_UpperCAmelCase = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __A ( self ) -> int:
_UpperCAmelCase = "stabilityai/stable-diffusion-2-base"
_UpperCAmelCase = DDIMScheduler.from_pretrained(snake_case_ , subfolder="scheduler" )
_UpperCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**snake_case_ ).images
_UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_UpperCAmelCase = np.array(
[
0.36_96_83_92,
0.27_02_53_72,
0.32_44_67_66,
0.28_37_93_87,
0.36_36_32_74,
0.30_73_33_47,
0.27_10_00_27,
0.27_05_41_25,
0.25_53_60_96,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def __A ( self ) -> str:
_UpperCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=snake_case_ )
_UpperCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**snake_case_ ).images
_UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_UpperCAmelCase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __A ( self ) -> Optional[int]:
_UpperCAmelCase = 0
def callback_fn(snake_case_ , snake_case_ , snake_case_ ) -> None:
_UpperCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_UpperCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_UpperCAmelCase = latents[0, -3:, -3:, -1]
_UpperCAmelCase = np.array(
[
0.18_68_18_69,
0.33_90_78_16,
0.5_36_12_76,
0.14_43_28_65,
-0.02_85_66_11,
-0.73_94_11_23,
0.23_39_79_87,
0.47_32_26_82,
-0.37_82_31_64,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
_UpperCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_UpperCAmelCase = latents[0, -3:, -3:, -1]
_UpperCAmelCase = np.array(
[
0.18_53_96_45,
0.33_98_72_48,
0.5_37_85_59,
0.14_43_71_42,
-0.02_45_52_61,
-0.7_33_83_17,
0.23_99_07_55,
0.47_35_62_72,
-0.3_78_65_05,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
_UpperCAmelCase = False
_UpperCAmelCase = "stabilityai/stable-diffusion-2-base"
_UpperCAmelCase = DDIMScheduler.from_pretrained(snake_case_ , subfolder="scheduler" )
_UpperCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ )
_UpperCAmelCase = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
pipe(**snake_case_ , callback=snake_case_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __A ( self ) -> str:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase = "stabilityai/stable-diffusion-2-base"
_UpperCAmelCase = DDIMScheduler.from_pretrained(snake_case_ , subfolder="scheduler" )
_UpperCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ )
_UpperCAmelCase = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**snake_case_ )
_UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 579
|
"""simple docstring"""
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class a ( unittest.TestCase ):
"""simple docstring"""
A__ : Any = inspect.getfile(accelerate.test_utils )
A__ : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_cli.py"] )
A__ : int = ["accelerate", "launch"]
A__ : Optional[Any] = Path.home() / ".cache/huggingface/accelerate"
A__ : Tuple = "default_config.yaml"
A__ : int = config_folder / config_file
A__ : str = config_folder / "_default_config.yaml"
A__ : Union[str, Any] = Path("tests/test_configs" )
@classmethod
def __A ( cls ) -> str:
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def __A ( cls ) -> Tuple:
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def __A ( self ) -> int:
_UpperCAmelCase = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def __A ( self ) -> List[str]:
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=snake_case_ ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(snake_case_ ), self.test_file_path] , env=os.environ.copy() )
def __A ( self ) -> Dict:
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class a ( unittest.TestCase ):
"""simple docstring"""
A__ : Any = "test-tpu"
A__ : Optional[Any] = "us-central1-a"
A__ : int = "ls"
A__ : Any = ["accelerate", "tpu-config"]
A__ : Union[str, Any] = "cd /usr/share"
A__ : Dict = "tests/test_samples/test_command_file.sh"
A__ : int = "Running gcloud compute tpus tpu-vm ssh"
def __A ( self ) -> List[Any]:
_UpperCAmelCase = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=snake_case_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , snake_case_ , )
def __A ( self ) -> List[str]:
_UpperCAmelCase = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=snake_case_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , snake_case_ , )
def __A ( self ) -> Union[str, Any]:
_UpperCAmelCase = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=snake_case_ )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , snake_case_ , )
def __A ( self ) -> List[Any]:
_UpperCAmelCase = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=snake_case_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , snake_case_ , )
def __A ( self ) -> Optional[Any]:
_UpperCAmelCase = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=snake_case_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , snake_case_ , )
def __A ( self ) -> List[str]:
_UpperCAmelCase = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=snake_case_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , snake_case_ , )
def __A ( self ) -> Optional[Any]:
_UpperCAmelCase = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=snake_case_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , snake_case_ , )
def __A ( self ) -> List[str]:
_UpperCAmelCase = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=snake_case_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , snake_case_ , )
def __A ( self ) -> Union[str, Any]:
_UpperCAmelCase = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=snake_case_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , snake_case_ , )
| 579
| 1
|
import random
def _A ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Any ):
"""simple docstring"""
lowerCAmelCase__ = a[left_index]
lowerCAmelCase__ = left_index + 1
for j in range(left_index + 1 , lowerCAmelCase_ ):
if a[j] < pivot:
lowerCAmelCase__ , lowerCAmelCase__ = a[i], a[j]
i += 1
lowerCAmelCase__ , lowerCAmelCase__ = a[i - 1], a[left_index]
return i - 1
def _A ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str ):
"""simple docstring"""
if left < right:
lowerCAmelCase__ = random.randint(lowerCAmelCase_ , right - 1 )
lowerCAmelCase__ , lowerCAmelCase__ = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
lowerCAmelCase__ = partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
quick_sort_random(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowerCAmelCase_ , pivot_index + 1 , lowerCAmelCase_ ) # recursive quicksort to the right of the pivot point
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = input("Enter numbers separated by a comma:\n" ).strip()
lowerCAmelCase__ = [int(lowerCAmelCase_ ) for item in user_input.split("," )]
quick_sort_random(lowerCAmelCase_ , 0 , len(lowerCAmelCase_ ) )
print(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 61
|
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowercase__ : Optional[int] = '''src/transformers'''
lowercase__ : int = '''docs/source/en/tasks'''
def __lowercase ( _a , _a , _a ):
with open(_a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case_ : Optional[Any] = f.readlines()
# Find the start prompt.
snake_case_ : Union[str, Any] = 0
while not lines[start_index].startswith(_a ):
start_index += 1
start_index += 1
snake_case_ : List[Any] = start_index
while not lines[end_index].startswith(_a ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH)
lowercase__ : int = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowercase__ : int = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def __lowercase ( _a ):
snake_case_ : List[Any] = TASK_GUIDE_TO_MODELS[task_guide]
snake_case_ : List[str] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_a , set() )
snake_case_ : Optional[int] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def __lowercase ( _a , _a=False ):
snake_case_, snake_case_, snake_case_, snake_case_ : int = _find_text_in_file(
filename=os.path.join(_a , _a ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
snake_case_ : Tuple = get_model_list_for_task(_a )
if current_list != new_list:
if overwrite:
with open(os.path.join(_a , _a ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
''' to fix this.''' )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowercase__ : Dict = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 123
| 0
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
snake_case__ = get_logger(__name__)
class UpperCamelCase :
'''simple docstring'''
A_ = 'dummy_data'
A_ = 'datasets'
A_ = False
def __init__( self , A_ , A_ , A_ , A_ = None , A_ = False , A_ = True , A_ = None , ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = 0
_lowerCamelCase = dataset_name
_lowerCamelCase = cache_dir
_lowerCamelCase = use_local_dummy_data
_lowerCamelCase = config
# download_callbacks take a single url as input
_lowerCamelCase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_lowerCamelCase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_lowerCamelCase = str(A_ )
# to be downloaded
_lowerCamelCase = None
_lowerCamelCase = None
@property
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
if self._dummy_file is None:
_lowerCamelCase = self.download_dummy_data()
return self._dummy_file
@property
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_lowerCamelCase = cached_path(
A_ , cache_dir=self.cache_dir , extract_compressed_file=A_ , force_extract=A_ )
return os.path.join(A_ , self.dummy_file_name )
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
if self._bucket_url is None:
_lowerCamelCase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def UpperCamelCase_ ( self , A_ , *A_ ) -> List[Any]:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_lowerCamelCase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_lowerCamelCase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(A_ , A_ ):
return self.create_dummy_data_dict(A_ , A_ )
elif isinstance(A_ , (list, tuple) ):
return self.create_dummy_data_list(A_ , A_ )
else:
return self.create_dummy_data_single(A_ , A_ )
def UpperCamelCase_ ( self , A_ , *A_ ) -> List[Any]:
"""simple docstring"""
return self.download_and_extract(A_ )
def UpperCamelCase_ ( self , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(A_ )
def UpperCamelCase_ ( self , A_ , *A_ , **A_ ) -> Union[str, Any]:
"""simple docstring"""
return path
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return {}
def UpperCamelCase_ ( self , A_ , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(A_ , A_ ):
for single_url in single_urls:
download_callback(A_ )
else:
_lowerCamelCase = single_urls
download_callback(A_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(A_ , A_ ):
_lowerCamelCase = [os.path.join(A_ , urllib.parse.quote_plus(Path(A_ ).name ) ) for x in single_urls]
else:
_lowerCamelCase = single_urls
_lowerCamelCase = os.path.join(A_ , urllib.parse.quote_plus(Path(A_ ).name ) )
_lowerCamelCase = value
# make sure that values are unique
if all(isinstance(A_ , A_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_lowerCamelCase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCamelCase_ ( self , A_ , A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_lowerCamelCase = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , A_ ) ) for url in data_url )
_lowerCamelCase = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_lowerCamelCase = [data_url[0]] * len(A_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(A_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase = os.path.join(A_ , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(A_ )
return dummy_data_list
def UpperCamelCase_ ( self , A_ , A_ ) -> Any:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(A_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase = os.path.join(A_ , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(A_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
pass
def UpperCamelCase_ ( self , A_ ) -> List[Any]:
"""simple docstring"""
def _iter_archive_members(A_ ):
# this preserves the order of the members inside the ZIP archive
_lowerCamelCase = Path(self.dummy_file ).parent
_lowerCamelCase = path.relative_to(A_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_lowerCamelCase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(A_ )
_lowerCamelCase = Path(A_ )
_lowerCamelCase = _iter_archive_members(A_ ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(A_ ).as_posix(), file_path.open('''rb''' )
def UpperCamelCase_ ( self , A_ ) -> Tuple:
"""simple docstring"""
if not isinstance(A_ , A_ ):
_lowerCamelCase = [paths]
for path in paths:
if os.path.isfile(A_ ):
if os.path.basename(A_ ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(A_ ):
if os.path.basename(A_ ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(A_ ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(A_ , A_ )
| 705
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
class UpperCamelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , A_ = 6_55_36 , A_ = None , A_ = 2 , A_ = 2 , A_ = 0 , A_ = "fourier" , A_ = True , A_ = False , A_ = 0.0 , A_ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , A_ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , A_ = "UNetMidBlock1D" , A_ = None , A_ = (32, 32, 64) , A_ = None , A_ = 8 , A_ = 1 , A_ = False , ) -> Dict:
"""simple docstring"""
super().__init__()
_lowerCamelCase = sample_size
# time
if time_embedding_type == "fourier":
_lowerCamelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=A_ , log=A_ , flip_sin_to_cos=A_ )
_lowerCamelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_lowerCamelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=A_ , downscale_freq_shift=A_ )
_lowerCamelCase = block_out_channels[0]
if use_timestep_embedding:
_lowerCamelCase = block_out_channels[0] * 4
_lowerCamelCase = TimestepEmbedding(
in_channels=A_ , time_embed_dim=A_ , act_fn=A_ , out_dim=block_out_channels[0] , )
_lowerCamelCase = nn.ModuleList([] )
_lowerCamelCase = None
_lowerCamelCase = nn.ModuleList([] )
_lowerCamelCase = None
# down
_lowerCamelCase = in_channels
for i, down_block_type in enumerate(A_ ):
_lowerCamelCase = output_channel
_lowerCamelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_lowerCamelCase = i == len(A_ ) - 1
_lowerCamelCase = get_down_block(
A_ , num_layers=A_ , in_channels=A_ , out_channels=A_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(A_ )
# mid
_lowerCamelCase = get_mid_block(
A_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=A_ , add_downsample=A_ , )
# up
_lowerCamelCase = list(reversed(A_ ) )
_lowerCamelCase = reversed_block_out_channels[0]
if out_block_type is None:
_lowerCamelCase = out_channels
else:
_lowerCamelCase = block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
_lowerCamelCase = output_channel
_lowerCamelCase = (
reversed_block_out_channels[i + 1] if i < len(A_ ) - 1 else final_upsample_channels
)
_lowerCamelCase = i == len(A_ ) - 1
_lowerCamelCase = get_up_block(
A_ , num_layers=A_ , in_channels=A_ , out_channels=A_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(A_ )
_lowerCamelCase = output_channel
# out
_lowerCamelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_lowerCamelCase = get_out_block(
out_block_type=A_ , num_groups_out=A_ , embed_dim=block_out_channels[0] , out_channels=A_ , act_fn=A_ , fc_dim=block_out_channels[-1] // 4 , )
def UpperCamelCase_ ( self , A_ , A_ , A_ = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
_lowerCamelCase = timestep
if not torch.is_tensor(A_ ):
_lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(A_ ) and len(timesteps.shape ) == 0:
_lowerCamelCase = timesteps[None].to(sample.device )
_lowerCamelCase = self.time_proj(A_ )
if self.config.use_timestep_embedding:
_lowerCamelCase = self.time_mlp(A_ )
else:
_lowerCamelCase = timestep_embed[..., None]
_lowerCamelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_lowerCamelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_lowerCamelCase = ()
for downsample_block in self.down_blocks:
_lowerCamelCase , _lowerCamelCase = downsample_block(hidden_states=A_ , temb=A_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_lowerCamelCase = self.mid_block(A_ , A_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_lowerCamelCase = down_block_res_samples[-1:]
_lowerCamelCase = down_block_res_samples[:-1]
_lowerCamelCase = upsample_block(A_ , res_hidden_states_tuple=A_ , temb=A_ )
# 5. post-process
if self.out_block:
_lowerCamelCase = self.out_block(A_ , A_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=A_ )
| 638
| 0
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_snake_case : Optional[int] = logging.getLogger(__name__)
_snake_case : Dict = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_snake_case : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_UpperCamelCase )} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def lowercase ( self : List[Any] ) -> List[Any]:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a_ = field(default=_UpperCamelCase , metadata={"""help""": """The input training data file (a text file)."""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a_ = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a_ = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def lowercase ( self : int ) -> int:
if self.train_file is not None:
__lowerCAmelCase = self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__lowerCAmelCase = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Union[str, Any] ):
with open(lowerCAmelCase_, 'r', encoding='utf-8' ) as f:
__lowerCAmelCase = [json.loads(lowerCAmelCase_ ) for line in f.read().splitlines() if (len(lowerCAmelCase_ ) > 0 and not line.isspace())]
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
__lowerCAmelCase = {c: dataset[c] for c in dataset.column_names}
__lowerCAmelCase = refs
return Dataset.from_dict(lowerCAmelCase_ )
def a_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s', lowerCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(data_args.dataset_name, data_args.dataset_config_name )
if "validation" not in datasets.keys():
__lowerCAmelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F"""train[:{data_args.validation_split_percentage}%]""", )
__lowerCAmelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F"""train[{data_args.validation_split_percentage}%:]""", )
else:
__lowerCAmelCase = {}
if data_args.train_file is not None:
__lowerCAmelCase = data_args.train_file
if data_args.validation_file is not None:
__lowerCAmelCase = data_args.validation_file
__lowerCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
__lowerCAmelCase = 'text'
__lowerCAmelCase = load_dataset(lowerCAmelCase_, data_files=lowerCAmelCase_ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.config_name, **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path, **lowerCAmelCase_ )
else:
__lowerCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
__lowerCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **lowerCAmelCase_ )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
__lowerCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=lowerCAmelCase_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info('Training new model from scratch' )
__lowerCAmelCase = AutoModelForMaskedLM.from_config(lowerCAmelCase_ )
model.resize_token_embeddings(len(lowerCAmelCase_ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__lowerCAmelCase = datasets['train'].column_names
else:
__lowerCAmelCase = datasets['validation'].column_names
__lowerCAmelCase = 'text' if 'text' in column_names else column_names[0]
__lowerCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(lowerCAmelCase_ : str ):
# Remove empty lines
__lowerCAmelCase = [line for line in examples['text'] if len(lowerCAmelCase_ ) > 0 and not line.isspace()]
return tokenizer(examples['text'], padding=lowerCAmelCase_, truncation=lowerCAmelCase_, max_length=data_args.max_seq_length )
__lowerCAmelCase = datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__lowerCAmelCase = add_chinese_references(tokenized_datasets['train'], data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__lowerCAmelCase = add_chinese_references(
tokenized_datasets['validation'], data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__lowerCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__lowerCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
__lowerCAmelCase = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_, mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowerCAmelCase_, args=lowerCAmelCase_, train_dataset=tokenized_datasets['train'] if training_args.do_train else None, eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None, tokenizer=lowerCAmelCase_, data_collator=lowerCAmelCase_, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__lowerCAmelCase = model_args.model_name_or_path
else:
__lowerCAmelCase = None
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCAmelCase = os.path.join(training_args.output_dir, 'train_results.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_, 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, 'trainer_state.json' ) )
# Evaluation
__lowerCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = math.exp(eval_output['eval_loss'] )
__lowerCAmelCase = perplexity
__lowerCAmelCase = os.path.join(training_args.output_dir, 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def a_ ( lowerCAmelCase_ : Tuple ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 53
|
def a_ ( lowerCAmelCase_ : int = 200_0000 ):
__lowerCAmelCase = [0 for i in range(n + 1 )]
__lowerCAmelCase = 1
__lowerCAmelCase = 1
for i in range(2, int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i, n + 1, lowerCAmelCase_ ):
__lowerCAmelCase = 1
__lowerCAmelCase = 0
for i in range(lowerCAmelCase_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 53
| 1
|
def _UpperCamelCase ( lowercase__=28123 ):
__SCREAMING_SNAKE_CASE : Tuple = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
__SCREAMING_SNAKE_CASE : str = set()
__SCREAMING_SNAKE_CASE : Any = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(lowercase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 260
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Optional[Any] ={'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple =[
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 260
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
def __init__( self :str , __A :List[Any] , __A :Optional[Any]=7 , __A :int=3 , __A :int=10 , __A :Dict=18 , __A :str=30 , __A :Any=400 , __A :Dict=True , __A :Any=None , __A :List[str]=True , __A :Dict=[0.5, 0.5, 0.5] , __A :List[str]=[0.5, 0.5, 0.5] , __A :List[str]=None , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = size if size is not None else {"""shortest_edge""": 18}
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = num_frames
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean
SCREAMING_SNAKE_CASE__ = image_std
SCREAMING_SNAKE_CASE__ = crop_size
def _snake_case ( self :Tuple ) -> str:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = VivitImageProcessor if is_vision_available() else None
def _snake_case ( self :int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = VivitImageProcessingTester(self )
@property
def _snake_case ( self :Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self :int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """do_center_crop""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
def _snake_case ( self :Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _snake_case ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
SCREAMING_SNAKE_CASE__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=__A )
for video in video_inputs:
self.assertIsInstance(__A , __A )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _snake_case ( self :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for video in video_inputs:
self.assertIsInstance(__A , __A )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _snake_case ( self :int ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for video in video_inputs:
self.assertIsInstance(__A , __A )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 6
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : str , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]=13 , lowerCAmelCase : Optional[int]=64 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : int=3 , lowerCAmelCase : List[str]=True , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Tuple=32 , lowerCAmelCase : Union[str, Any]=5 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Any=37 , lowerCAmelCase : Tuple="gelu" , lowerCAmelCase : int=0.1 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : Optional[Any]=10 , lowerCAmelCase : List[str]=0.02 , lowerCAmelCase : Optional[int]=[1, 16, 4, 4] , lowerCAmelCase : Optional[int]=None , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowercase__ = (self.image_size // 32) ** 2
lowercase__ = num_patches + 1
def UpperCAmelCase ( self : Dict) -> Any:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=lowerCAmelCase , )
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any]) -> int:
"""simple docstring"""
lowercase__ = ViTHybridModel(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase ( self : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : List[str]) -> Tuple:
"""simple docstring"""
lowercase__ = self.type_sequence_label_size
lowercase__ = ViTHybridForImageClassification(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
lowercase__ = model(lowerCAmelCase , labels=lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCAmelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__, lowercase__, lowercase__ = config_and_inputs
lowercase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Any = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
A : int = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
A : Tuple = False
A : Tuple = False
A : Union[str, Any] = False
def UpperCAmelCase ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = ViTHybridModelTester(self)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37)
def UpperCAmelCase ( self : Dict) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
pass
def UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear))
def UpperCAmelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCAmelCase)
lowercase__ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = _config_zero_init(lowerCAmelCase)
for model_class in self.all_model_classes:
lowercase__ = model_class(config=lowerCAmelCase)
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowercase__ = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = ViTHybridModel.from_pretrained(lowerCAmelCase)
self.assertIsNotNone(lowerCAmelCase)
def _lowerCAmelCase ( ):
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
lowerCAmelCase)
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCAmelCase , return_tensors='pt').to(lowerCAmelCase)
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCAmelCase)
# verify the logits
lowercase__ = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , lowerCAmelCase)
lowercase__ = torch.tensor([-1.90_90, -0.49_93, -0.23_89]).to(lowerCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4))
@slow
@require_accelerate
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
lowercase__ = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384')
lowercase__ = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto')
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCAmelCase , return_tensors='pt')
lowercase__ = model(**lowerCAmelCase)
lowercase__ = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowercase__ = logits.argmax(-1).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat')
| 622
| 0
|
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Union[str, Any]=13 , lowerCamelCase__ :int=30 , lowerCamelCase__ :Optional[int]=2 , lowerCamelCase__ :Optional[int]=3 , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :Any=True , lowerCamelCase__ :Optional[int]=32 , lowerCamelCase__ :List[str]=2 , lowerCamelCase__ :Tuple=4 , lowerCamelCase__ :List[Any]=37 , lowerCamelCase__ :Optional[Any]="gelu" , lowerCamelCase__ :str=0.1 , lowerCamelCase__ :str=0.1 , lowerCamelCase__ :List[str]=10 , lowerCamelCase__ :Dict=0.02 , lowerCamelCase__ :int=3 , lowerCamelCase__ :List[Any]=0.6 , lowerCamelCase__ :str=None , ):
UpperCamelCase__ :str = parent
UpperCamelCase__ :Optional[Any] = batch_size
UpperCamelCase__ :List[Any] = image_size
UpperCamelCase__ :Tuple = patch_size
UpperCamelCase__ :Union[str, Any] = num_channels
UpperCamelCase__ :Optional[int] = is_training
UpperCamelCase__ :str = use_labels
UpperCamelCase__ :Union[str, Any] = hidden_size
UpperCamelCase__ :Any = num_hidden_layers
UpperCamelCase__ :Dict = num_attention_heads
UpperCamelCase__ :Dict = intermediate_size
UpperCamelCase__ :Dict = hidden_act
UpperCamelCase__ :Union[str, Any] = hidden_dropout_prob
UpperCamelCase__ :int = attention_probs_dropout_prob
UpperCamelCase__ :List[str] = type_sequence_label_size
UpperCamelCase__ :str = initializer_range
UpperCamelCase__ :Any = mask_ratio
UpperCamelCase__ :Any = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase__ :List[str] = (image_size // patch_size) ** 2
UpperCamelCase__ :int = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __a ( self :str ):
UpperCamelCase__ :Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :str = None
if self.use_labels:
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :List[Any] = self.get_config()
return config, pixel_values, labels
def __a ( self :int ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __a ( self :Any , lowerCamelCase__ :List[str] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[Any] ):
UpperCamelCase__ :str = TFViTMAEModel(config=lowerCamelCase__ )
UpperCamelCase__ :List[str] = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Union[str, Any] ):
UpperCamelCase__ :Optional[int] = TFViTMAEForPreTraining(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ , training=lowerCamelCase__ )
# expected sequence length = num_patches
UpperCamelCase__ :Tuple = (self.image_size // self.patch_size) ** 2
UpperCamelCase__ :int = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase__ :int = 1
UpperCamelCase__ :Any = TFViTMAEForPreTraining(lowerCamelCase__ )
UpperCamelCase__ :str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ :int = model(lowerCamelCase__ , training=lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __a ( self :List[Any] ):
UpperCamelCase__ :int = self.prepare_config_and_inputs()
(UpperCamelCase__) :str = config_and_inputs
UpperCamelCase__ :Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : str = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_snake_case : int = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
_snake_case : Optional[int] = False
_snake_case : Union[str, Any] = False
_snake_case : Dict = False
_snake_case : List[Any] = False
def __a ( self :int ):
UpperCamelCase__ :Optional[int] = TFViTMAEModelTester(self )
UpperCamelCase__ :Dict = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def __a ( self :List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def __a ( self :int ):
pass
def __a ( self :List[Any] ):
UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Any = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase__ :str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , tf.keras.layers.Layer ) )
def __a ( self :Optional[Any] ):
UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :List[Any] = model_class(lowerCamelCase__ )
UpperCamelCase__ :List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ :Optional[Any] = [*signature.parameters.keys()]
UpperCamelCase__ :Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def __a ( self :Tuple ):
UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def __a ( self :Optional[Any] ):
UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def __a ( self :Dict ):
# make the mask reproducible
np.random.seed(2 )
UpperCamelCase__ :str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :List[Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase__ :Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase__ :Optional[int] = model_class(lowerCamelCase__ )
UpperCamelCase__ :Tuple = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Any = model(lowerCamelCase__ , noise=lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = copy.deepcopy(self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
UpperCamelCase__ :Tuple = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
UpperCamelCase__ :List[str] = outputs_dict[0].numpy()
UpperCamelCase__ :Optional[int] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def __a ( self :Optional[int] ):
# make the mask reproducible
np.random.seed(2 )
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :Tuple = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase__ :Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowerCamelCase__ :Dict ):
UpperCamelCase__ :Optional[int] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowerCamelCase__ ):
UpperCamelCase__ :List[Any] = v.numpy()
else:
UpperCamelCase__ :List[Any] = np.array(lowerCamelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCamelCase__ :List[str] = model_class(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :str = prepare_numpy_arrays(lowerCamelCase__ )
UpperCamelCase__ :List[str] = model(lowerCamelCase__ , noise=lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Any , lowerCamelCase__ :List[str] ):
# make masks reproducible
np.random.seed(2 )
UpperCamelCase__ :Union[str, Any] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCamelCase__ :int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase__ :Tuple = tf.constant(lowerCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase__ :Optional[int] = tf_noise
super().check_pt_tf_models(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Dict ):
# make mask reproducible
np.random.seed(2 )
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :Union[str, Any] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowerCamelCase__ )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(lowerCamelCase__ , lowerCamelCase__ ),)
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowerCamelCase__ , """_keras_serializable""" , lowerCamelCase__ )
}
UpperCamelCase__ :Dict = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase__ :List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase__ :Dict = tf.convert_to_tensor(lowerCamelCase__ )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
UpperCamelCase__ :Optional[Any] = main_layer_class(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCamelCase__ :Optional[Any] = tf.keras.Model(lowerCamelCase__ , outputs=main_layer(lowerCamelCase__ ) )
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ :Union[str, Any] = os.path.join(lowerCamelCase__ , """keras_model.h5""" )
model.save(lowerCamelCase__ )
UpperCamelCase__ :Tuple = tf.keras.models.load_model(
lowerCamelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowerCamelCase__ , tf.keras.Model )
UpperCamelCase__ :int = model(lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __a ( self :str ):
# make mask reproducible
np.random.seed(2 )
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase__ :Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase__ :Dict = model_class(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , noise=lowerCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase__ :List[str] = outputs.last_hidden_state.numpy()
UpperCamelCase__ :Dict = 0
else:
UpperCamelCase__ :Any = outputs.logits.numpy()
UpperCamelCase__ :List[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ , saved_model=lowerCamelCase__ )
UpperCamelCase__ :Any = model_class.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Dict = model(lowerCamelCase__ , noise=lowerCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase__ :int = after_outputs["""last_hidden_state"""].numpy()
UpperCamelCase__ :Dict = 0
else:
UpperCamelCase__ :Dict = after_outputs["""logits"""].numpy()
UpperCamelCase__ :List[Any] = 0
UpperCamelCase__ :Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ , 1e-5 )
def __a ( self :Optional[int] ):
# make mask reproducible
np.random.seed(2 )
UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase__ :Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase__ :Optional[Any] = model_class(lowerCamelCase__ )
UpperCamelCase__ :Tuple = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ , noise=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCamelCase__ :List[Any] = model_class.from_config(model.config )
UpperCamelCase__ :str = new_model(lowerCamelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
UpperCamelCase__ :Any = new_model(lowerCamelCase__ , noise=lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def __a ( self :Tuple ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def __a ( self :Union[str, Any] ):
pass
@slow
def __a ( self :str ):
UpperCamelCase__ :Tuple = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(lowerCamelCase__ )
def A ( ) -> Optional[int]:
UpperCamelCase__ :str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self :Optional[Any] ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def __a ( self :int ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCamelCase__ :List[Any] = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
UpperCamelCase__ :Union[str, Any] = self.default_image_processor
UpperCamelCase__ :Union[str, Any] = prepare_img()
UpperCamelCase__ :List[str] = image_processor(images=lowerCamelCase__ , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase__ :Tuple = ViTMAEConfig()
UpperCamelCase__ :List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase__ :Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCamelCase__ :Union[str, Any] = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
# verify the logits
UpperCamelCase__ :Dict = tf.convert_to_tensor([1, 1_96, 7_68] )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
UpperCamelCase__ :str = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
| 715
|
from collections.abc import Generator
def A ( ) -> Generator[int, None, None]:
UpperCamelCase__ , UpperCamelCase__ :str = 0, 1
while True:
UpperCamelCase__ , UpperCamelCase__ :Tuple = b, a + b
yield b
def A ( lowercase__ : int = 1000 ) -> int:
UpperCamelCase__ :Any = 1
UpperCamelCase__ :Any = fibonacci_generator()
while len(str(next(lowercase__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 383
| 0
|
from __future__ import annotations
def _a ( lowercase__ : list[float] ):
'''simple docstring'''
if len(lowercase__ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
SCREAMING_SNAKE_CASE__ : Tuple = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85
|
import copy
import random
from transformers import CLIPTokenizer
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , *a__ , **a__):
"""simple docstring"""
super().__init__(*a__ , **a__)
_lowerCamelCase : Optional[Any] = {}
def __snake_case ( self , a__ , *a__ , **a__):
"""simple docstring"""
_lowerCamelCase : Optional[int] = super().add_tokens(a__ , *a__ , **a__)
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
''' `placeholder_token` that is not already in the tokenizer.''')
def __snake_case ( self , a__ , *a__ , a__=1 , **a__):
"""simple docstring"""
_lowerCamelCase : Tuple = []
if num_vec_per_token == 1:
self.try_adding_tokens(a__ , *a__ , **a__)
output.append(a__)
else:
_lowerCamelCase : Dict = []
for i in range(a__):
_lowerCamelCase : Optional[Any] = placeholder_token + F"""_{i}"""
self.try_adding_tokens(a__ , *a__ , **a__)
output.append(a__)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""")
_lowerCamelCase : int = output
def __snake_case ( self , a__ , a__=False , a__=1.0):
"""simple docstring"""
if isinstance(a__ , a__):
_lowerCamelCase : Tuple = []
for i in range(len(a__)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=a__))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_lowerCamelCase : Union[str, Any] = self.token_map[placeholder_token]
_lowerCamelCase : Optional[Any] = tokens[: 1 + int(len(a__) * prop_tokens_to_load)]
if vector_shuffle:
_lowerCamelCase : int = copy.copy(a__)
random.shuffle(a__)
_lowerCamelCase : List[str] = text.replace(a__ , ''' '''.join(a__))
return text
def __call__( self , a__ , *a__ , a__=False , a__=1.0 , **a__):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
a__ , vector_shuffle=a__ , prop_tokens_to_load=a__) , *a__ , **a__ , )
def __snake_case ( self , a__ , *a__ , a__=False , a__=1.0 , **a__):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
a__ , vector_shuffle=a__ , prop_tokens_to_load=a__) , *a__ , **a__ , )
| 114
| 0
|
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __A:
def __init__( self, A=2, A=3, A=64, A=None ):
"""simple docstring"""
_UpperCamelCase = np.random.default_rng(A )
_UpperCamelCase = length
_UpperCamelCase = rng.normal(size=(length,) ).astype(np.floataa )
_UpperCamelCase = a * self.x + b + rng.normal(scale=0.1, size=(length,) ).astype(np.floataa )
def __len__( self ):
"""simple docstring"""
return self.length
def __getitem__( self, A ):
"""simple docstring"""
return {"x": self.x[i], "y": self.y[i]}
class __A( torch.nn.Module ):
def __init__( self, A=0, A=0, A=False ):
"""simple docstring"""
super().__init__()
_UpperCamelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_UpperCamelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_UpperCamelCase = True
def _UpperCamelCase ( self, A=None ):
"""simple docstring"""
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
_UpperCamelCase = False
return x * self.a[0] + self.b[0]
class __A( torch.nn.Module ):
def __init__( self, A=0, A=0, A=False ):
"""simple docstring"""
super().__init__()
_UpperCamelCase = torch.nn.Parameter(torch.tensor(A ).float() )
_UpperCamelCase = torch.nn.Parameter(torch.tensor(A ).float() )
_UpperCamelCase = True
def _UpperCamelCase ( self, A=None ):
"""simple docstring"""
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
_UpperCamelCase = False
return x * self.a + self.b
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
_UpperCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_UpperCamelCase = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
_UpperCamelCase = load_dataset('''csv''' , data_files=lowerCAmelCase )
_UpperCamelCase = datasets['''train'''].unique('''label''' )
_UpperCamelCase = {v: i for i, v in enumerate(lowerCAmelCase )}
def tokenize_function(lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCamelCase = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase , max_length=lowerCAmelCase , padding='''max_length''' )
if "label" in examples:
_UpperCamelCase = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCamelCase = datasets.map(
lowerCAmelCase , batched=lowerCAmelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(lowerCAmelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
_UpperCamelCase = DataLoader(tokenized_datasets['''train'''] , shuffle=lowerCAmelCase , collate_fn=lowerCAmelCase , batch_size=2 )
_UpperCamelCase = DataLoader(tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase , collate_fn=lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 105
|
from math import factorial
def SCREAMING_SNAKE_CASE ( lowerCAmelCase = 20 ):
_UpperCamelCase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
_UpperCamelCase = n // 2
return int(factorial(lowerCAmelCase ) / (factorial(lowerCAmelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
lowercase : Dict = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 105
| 1
|
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def snake_case (UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : List[str] ):
'''simple docstring'''
lowerCamelCase__ = AlbertConfig.from_json_file(UpperCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase__ = AlbertForPreTraining(UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCamelCase )
if __name__ == "__main__":
a__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a__ : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 165
|
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = """laion/clap-htsat-unfused"""
lowerCamelCase__ = tempfile.mkdtemp()
def _UpperCamelCase ( self : Dict , **a_ : Optional[int] ):
"""simple docstring"""
return RobertaTokenizer.from_pretrained(self.checkpoint , **a_ )
def _UpperCamelCase ( self : Optional[Any] , **a_ : str ):
"""simple docstring"""
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **a_ )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = ClapProcessor(tokenizer=a_ , feature_extractor=a_ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , a_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , a_ )
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
lowerCamelCase__ = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase__ = self.get_feature_extractor(do_normalize=a_ , padding_value=1.0 )
lowerCamelCase__ = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=a_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , a_ )
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = ClapProcessor(tokenizer=a_ , feature_extractor=a_ )
lowerCamelCase__ = floats_list((3, 10_00) )
lowerCamelCase__ = feature_extractor(a_ , return_tensors="""np""" )
lowerCamelCase__ = processor(audios=a_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = ClapProcessor(tokenizer=a_ , feature_extractor=a_ )
lowerCamelCase__ = """This is a test string"""
lowerCamelCase__ = processor(text=a_ )
lowerCamelCase__ = tokenizer(a_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = ClapProcessor(tokenizer=a_ , feature_extractor=a_ )
lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ = processor.batch_decode(a_ )
lowerCamelCase__ = tokenizer.batch_decode(a_ )
self.assertListEqual(a_ , a_ )
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = ClapProcessor(tokenizer=a_ , feature_extractor=a_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 165
| 1
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = s.rsplit(UpperCAmelCase__ , UpperCAmelCase__ )
return new.join(UpperCAmelCase__ )
def a__ ( _SCREAMING_SNAKE_CASE : Dict ) -> int:
"""simple docstring"""
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : Any = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
UpperCAmelCase_ : Tuple = key.replace(F'''{group_key}.''' , F'''{group_key}.group.''' )
if "res_path" in key:
UpperCAmelCase_ : Any = key.replace("res_path." , "res_path.path." )
if key.endswith(".w" ):
UpperCAmelCase_ : int = rreplace(UpperCAmelCase__ , ".w" , ".weight" , 1 )
if key.endswith(".b" ):
UpperCAmelCase_ : int = rreplace(UpperCAmelCase__ , ".b" , ".bias" , 1 )
UpperCAmelCase_ : int = value.float()
return upgrade
@torch.no_grad()
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Optional[Any]=True ) -> Tuple:
"""simple docstring"""
from dall_e import Encoder
UpperCAmelCase_ : Tuple = Encoder()
if os.path.exists(UpperCAmelCase__ ):
UpperCAmelCase_ : Tuple = torch.load(UpperCAmelCase__ )
else:
UpperCAmelCase_ : Dict = torch.hub.load_state_dict_from_url(UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCAmelCase_ : Dict = ckpt.state_dict()
encoder.load_state_dict(UpperCAmelCase__ )
if config_path is not None:
UpperCAmelCase_ : Dict = FlavaImageCodebookConfig.from_pretrained(UpperCAmelCase__ )
else:
UpperCAmelCase_ : Any = FlavaImageCodebookConfig()
UpperCAmelCase_ : Optional[int] = FlavaImageCodebook(UpperCAmelCase__ ).eval()
UpperCAmelCase_ : int = encoder.state_dict()
UpperCAmelCase_ : Optional[Any] = upgrade_state_dict(UpperCAmelCase__ )
hf_model.load_state_dict(UpperCAmelCase__ )
UpperCAmelCase_ : int = hf_model.state_dict()
UpperCAmelCase_ : Dict = count_parameters(UpperCAmelCase__ )
UpperCAmelCase_ : List[str] = count_parameters(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(UpperCAmelCase__ )
else:
return hf_state_dict
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
_lowerCamelCase = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 708
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Tuple ="roberta-prelayernorm"
def __init__( self ,_snake_case=5_02_65 ,_snake_case=7_68 ,_snake_case=12 ,_snake_case=12 ,_snake_case=30_72 ,_snake_case="gelu" ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=5_12 ,_snake_case=2 ,_snake_case=0.02 ,_snake_case=1E-12 ,_snake_case=1 ,_snake_case=0 ,_snake_case=2 ,_snake_case="absolute" ,_snake_case=True ,_snake_case=None ,**_snake_case ,):
super().__init__(pad_token_id=_snake_case ,bos_token_id=_snake_case ,eos_token_id=_snake_case ,**_snake_case )
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : int = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Dict = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_vocab_size
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : str = layer_norm_eps
UpperCAmelCase_ : Optional[int] = position_embedding_type
UpperCAmelCase_ : Any = use_cache
UpperCAmelCase_ : List[Any] = classifier_dropout
class _snake_case (__SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
UpperCAmelCase_ : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 323
| 0
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowerCAmelCase : str =logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase : Any ="""
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE=8 ):
UpperCAmelCase__: int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase__: Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __UpperCamelCase ( _a ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
super().__init__()
self.register_modules(
unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , movq=lowerCamelCase__ , )
UpperCAmelCase__: Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if latents is None:
UpperCAmelCase__: Any = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=lowerCamelCase__ , dtype=lowerCamelCase__ )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase__: Tuple = latents.to(lowerCamelCase__ )
UpperCAmelCase__: Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def _UpperCAmelCase ( self , lowerCamelCase__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase__: Any = torch.device(F"cuda:{gpu_id}" )
UpperCAmelCase__: int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCAmelCase ( self , lowerCamelCase__=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCAmelCase__: Union[str, Any] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowerCamelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase__: int = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase__ , UpperCAmelCase__: Tuple = cpu_offload_with_hook(lowerCamelCase__ , lowerCamelCase__ , prev_module_hook=lowerCamelCase__ )
# We'll offload the last model manually.
UpperCAmelCase__: List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCAmelCase ( self ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase__ )
def __call__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 5_1_2 , lowerCamelCase__ = 5_1_2 , lowerCamelCase__ = 1_0_0 , lowerCamelCase__ = 4.0 , lowerCamelCase__ = 1 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "pil" , lowerCamelCase__ = True , ):
UpperCAmelCase__: str = self._execution_device
UpperCAmelCase__: List[Any] = guidance_scale > 1.0
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: List[Any] = torch.cat(lowerCamelCase__ , dim=0 )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: List[Any] = torch.cat(lowerCamelCase__ , dim=0 )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: Any = torch.cat(lowerCamelCase__ , dim=0 )
UpperCAmelCase__: Dict = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
UpperCAmelCase__: Tuple = image_embeds.repeat_interleave(lowerCamelCase__ , dim=0 )
UpperCAmelCase__: Union[str, Any] = negative_image_embeds.repeat_interleave(lowerCamelCase__ , dim=0 )
UpperCAmelCase__: Any = hint.repeat_interleave(lowerCamelCase__ , dim=0 )
UpperCAmelCase__: Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase__ )
UpperCAmelCase__: Dict = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase__ )
self.scheduler.set_timesteps(lowerCamelCase__ , device=lowerCamelCase__ )
UpperCAmelCase__: Optional[Any] = self.scheduler.timesteps
UpperCAmelCase__: Optional[int] = self.movq.config.latent_channels
UpperCAmelCase__ , UpperCAmelCase__: Tuple = downscale_height_and_width(lowerCamelCase__ , lowerCamelCase__ , self.movq_scale_factor )
# create initial latent
UpperCAmelCase__: Any = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCamelCase__ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase__: str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase__: List[str] = {"image_embeds": image_embeds, "hint": hint}
UpperCAmelCase__: Any = self.unet(
sample=lowerCamelCase__ , timestep=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , added_cond_kwargs=lowerCamelCase__ , return_dict=lowerCamelCase__ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase__ , UpperCAmelCase__: str = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase__ , UpperCAmelCase__: Any = noise_pred.chunk(2 )
UpperCAmelCase__ , UpperCAmelCase__: Any = variance_pred.chunk(2 )
UpperCAmelCase__: List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase__: Dict = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase__ , UpperCAmelCase__: int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase__: List[Any] = self.scheduler.step(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ , )[0]
# post-processing
UpperCAmelCase__: Union[str, Any] = self.movq.decode(lowerCamelCase__ , force_not_quantize=lowerCamelCase__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase__: List[str] = image * 0.5 + 0.5
UpperCAmelCase__: int = image.clamp(0 , 1 )
UpperCAmelCase__: Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase__: List[str] = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase__ )
| 113
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_lowerCAmelCase : Any =None
_lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
_lowerCAmelCase : Dict ={"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase : Optional[int] ={
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase : List[Any] ={
"""facebook/nllb-large-en-ro""": 10_24,
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
_lowerCAmelCase : Union[str, Any] =["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class __UpperCamelCase ( _a ):
'''simple docstring'''
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = NllbTokenizer
__magic_name__ = []
__magic_name__ = []
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=False , **lowerCamelCase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__: int = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
UpperCAmelCase__: Tuple = legacy_behaviour
super().__init__(
vocab_file=lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , src_lang=lowerCamelCase__ , tgt_lang=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , legacy_behaviour=lowerCamelCase__ , **lowerCamelCase__ , )
UpperCAmelCase__: Optional[int] = vocab_file
UpperCAmelCase__: Tuple = False if not self.vocab_file else True
UpperCAmelCase__: str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
UpperCAmelCase__: Optional[int] = {
lang_code: self.convert_tokens_to_ids(lowerCamelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCAmelCase__: List[Any] = src_lang if src_lang is not None else "eng_Latn"
UpperCAmelCase__: Union[str, Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCAmelCase__: List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCAmelCase ( self ):
return self._src_lang
@src_lang.setter
def _UpperCAmelCase ( self , lowerCamelCase__ ):
UpperCAmelCase__: Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
UpperCAmelCase__: List[Any] = [self.sep_token_id]
UpperCAmelCase__: Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCAmelCase__: str = src_lang
UpperCAmelCase__: Any = self(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
UpperCAmelCase__: List[Any] = self.convert_tokens_to_ids(lowerCamelCase__ )
UpperCAmelCase__: int = tgt_lang_id
return inputs
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = "eng_Latn" , lowerCamelCase__ = None , lowerCamelCase__ = "fra_Latn" , **lowerCamelCase__ , ):
UpperCAmelCase__: Union[str, Any] = src_lang
UpperCAmelCase__: Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCAmelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCAmelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCAmelCase ( self , lowerCamelCase__ ):
UpperCAmelCase__: int = self.convert_tokens_to_ids(lowerCamelCase__ )
if self.legacy_behaviour:
UpperCAmelCase__: List[str] = []
UpperCAmelCase__: List[Any] = [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase__: Optional[Any] = [self.cur_lang_code]
UpperCAmelCase__: List[Any] = [self.eos_token_id]
UpperCAmelCase__: Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase__: Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase__: Any = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCAmelCase ( self , lowerCamelCase__ ):
UpperCAmelCase__: Optional[int] = self.convert_tokens_to_ids(lowerCamelCase__ )
if self.legacy_behaviour:
UpperCAmelCase__: int = []
UpperCAmelCase__: str = [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase__: Tuple = [self.cur_lang_code]
UpperCAmelCase__: Tuple = [self.eos_token_id]
UpperCAmelCase__: Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase__: Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase__: Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory." )
return
UpperCAmelCase__: str = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 113
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 713
|
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__(self , UpperCAmelCase , UpperCAmelCase=3 , UpperCAmelCase=3_2 , UpperCAmelCase=3 , UpperCAmelCase=1_0 , UpperCAmelCase=[1_0, 2_0, 3_0, 4_0] , UpperCAmelCase=[1, 1, 2, 1] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase="relu" , UpperCAmelCase=3 , UpperCAmelCase=None , ):
'''simple docstring'''
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =image_size
__UpperCAmelCase =num_channels
__UpperCAmelCase =embeddings_size
__UpperCAmelCase =hidden_sizes
__UpperCAmelCase =depths
__UpperCAmelCase =is_training
__UpperCAmelCase =use_labels
__UpperCAmelCase =hidden_act
__UpperCAmelCase =num_labels
__UpperCAmelCase =scope
__UpperCAmelCase =len(UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCAmelCase =None
if self.use_labels:
__UpperCAmelCase =ids_tensor([self.batch_size] , self.num_labels)
__UpperCAmelCase =self.get_config()
return config, pixel_values, labels
def A__ (self):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =TFRegNetModel(config=UpperCAmelCase)
__UpperCAmelCase =model(UpperCAmelCase , training=UpperCAmelCase)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.num_labels
__UpperCAmelCase =TFRegNetForImageClassification(UpperCAmelCase)
__UpperCAmelCase =model(UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a_ : Any = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
a_ : Union[str, Any] = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
a_ : str = False
a_ : List[str] = False
a_ : int = False
a_ : List[str] = False
a_ : List[Any] = False
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =TFRegNetModelTester(self)
__UpperCAmelCase =ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase)
def A__ (self):
'''simple docstring'''
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''')
def A__ (self):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''')) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def A__ (self):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''')
def A__ (self):
'''simple docstring'''
pass
def A__ (self):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(UpperCAmelCase)
__UpperCAmelCase =inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase =[*signature.parameters.keys()]
__UpperCAmelCase =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase)
def A__ (self):
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
__UpperCAmelCase =model_class(UpperCAmelCase)
__UpperCAmelCase =model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase) , training=UpperCAmelCase)
__UpperCAmelCase =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase =self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase) , expected_num_stages + 1)
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__UpperCAmelCase =layer_type
__UpperCAmelCase =True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase =True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase={}):
__UpperCAmelCase =model(UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase)
__UpperCAmelCase =model(UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase).to_tuple()
def recursive_check(UpperCAmelCase , UpperCAmelCase):
if isinstance(UpperCAmelCase , (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase , UpperCAmelCase):
recursive_check(UpperCAmelCase , UpperCAmelCase)
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(UpperCAmelCase , UpperCAmelCase)) , msg=(
'''Tuple and dict output are not equal. Difference:'''
f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}"""
) , )
recursive_check(UpperCAmelCase , UpperCAmelCase)
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(UpperCAmelCase)
__UpperCAmelCase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase)
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase)
__UpperCAmelCase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase)
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase)
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'''output_hidden_states''': True})
__UpperCAmelCase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase)
__UpperCAmelCase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase)
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'''output_hidden_states''': True})
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase)
@slow
def A__ (self):
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase =TFRegNetModel.from_pretrained(UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
__UpperCAmelCase =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def A__ (self):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
__UpperCAmelCase =self.default_image_processor
__UpperCAmelCase =prepare_img()
__UpperCAmelCase =image_processor(images=UpperCAmelCase , return_tensors='''tf''')
# forward pass
__UpperCAmelCase =model(**UpperCAmelCase , training=UpperCAmelCase)
# verify the logits
__UpperCAmelCase =tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase)
__UpperCAmelCase =tf.constant([-0.4180, -1.5051, -3.4836])
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4)
| 142
| 0
|
import re
from filelock import FileLock
try:
import nltk
a : Optional[int] = True
except (ImportError, ModuleNotFoundError):
a : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def lowerCamelCase__ ( __lowerCamelCase : str ):
re.sub("""<n>""" , """""" , __lowerCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__lowerCamelCase ) )
| 63
|
"""simple docstring"""
from math import sqrt
def UpperCAmelCase ( A : int = 100_0000 ):
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(A , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F'''{solution() = }''')
| 573
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCamelCase_ ( a_ , a_=False ) ->List[Any]:
A =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''module.blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''module.blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''module.blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''module.blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''module.blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A =[(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def UpperCamelCase_ ( a_ , a_ , a_=False ) ->int:
for i in range(config.num_hidden_layers ):
if base_model:
A =""
else:
A ="vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A =state_dict.pop(f'''module.blocks.{i}.attn.qkv.weight''' )
A =state_dict.pop(f'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A =in_proj_weight[
: config.hidden_size, :
]
A =in_proj_bias[: config.hidden_size]
A =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A =in_proj_weight[
-config.hidden_size :, :
]
A =in_proj_bias[-config.hidden_size :]
def UpperCamelCase_ ( a_ ) ->Dict:
A =["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def UpperCamelCase_ ( a_ ) ->int:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
A =[
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->Optional[int]:
A =dct.pop(_lowerCAmelCase )
A =val
def UpperCamelCase_ ( a_ , a_ ) ->Dict:
A =ViTMSNConfig()
A =1000
A ="datasets/huggingface/label-files"
A ="imagenet-1k-id2label.json"
A =json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase ) , "r" ) )
A ={int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A =idalabel
A ={v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
A =384
A =1536
A =6
elif "l16" in checkpoint_url:
A =1024
A =4096
A =24
A =16
A =0.1
elif "b4" in checkpoint_url:
A =4
elif "l7" in checkpoint_url:
A =7
A =1024
A =4096
A =24
A =16
A =0.1
A =ViTMSNModel(_lowerCAmelCase )
A =torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" )["target_encoder"]
A =ViTImageProcessor(size=config.image_size )
remove_projection_head(_lowerCAmelCase )
A =create_rename_keys(_lowerCAmelCase , base_model=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , base_model=_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
A ="http://images.cocodataset.org/val2017/000000039769.jpg"
A =Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
A =ViTImageProcessor(
size=config.image_size , image_mean=_lowerCAmelCase , image_std=_lowerCAmelCase )
A =image_processor(images=_lowerCAmelCase , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
A =model(**_lowerCAmelCase )
A =outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
A =torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
A =torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
A =torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
A =torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
A =torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _lowerCAmelCase , atol=1E-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__a = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 701
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = ["""model.decoder.embed_positions.weights"""]
def UpperCamelCase_ ( a_ ) ->List[str]:
if "emb" in name:
A =name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
A =name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
A =name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
A =name.replace("linear1" , "fc1" )
if "linear2" in name:
A =name.replace("linear2" , "fc2" )
if "norm1" in name:
A =name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
A =name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
A =name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
A =name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
A =name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
A =name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def UpperCamelCase_ ( a_ , a_ ) ->Tuple[Dict, Dict]:
A =list(state_dict.keys() )
A ={}
for key in keys:
A =state_dict.pop(a_ )
A =rename_keys(a_ )
if "in_proj_weight" in key:
# split fused qkv proj
A =val[:hidden_size, :]
A =val[hidden_size : 2 * hidden_size, :]
A =val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
A =val
else:
A =val
return state_dict, enc_dec_proj_state_dict
def UpperCamelCase_ ( a_ ) ->MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
A =1024
A =24
A =16
elif checkpoint == "medium":
A =1536
A =48
A =24
elif checkpoint == "large":
A =2048
A =48
A =32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
A =MusicgenDecoderConfig(
hidden_size=a_ , ffn_dim=hidden_size * 4 , num_hidden_layers=a_ , num_attention_heads=a_ , )
return config
@torch.no_grad()
def UpperCamelCase_ ( a_ , a_=None , a_=None , a_="cpu" ) ->Union[str, Any]:
A =MusicGen.get_pretrained(a_ , device=a_ )
A =decoder_config_from_checkpoint(a_ )
A =fairseq_model.lm.state_dict()
A , A =rename_state_dict(
a_ , hidden_size=decoder_config.hidden_size )
A =TaEncoderModel.from_pretrained("t5-base" )
A =EncodecModel.from_pretrained("facebook/encodec_32khz" )
A =MusicgenForCausalLM(a_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
A , A =decoder.load_state_dict(a_ , strict=a_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(a_ )
if len(a_ ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(a_ ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
A =MusicgenForConditionalGeneration(text_encoder=a_ , audio_encoder=a_ , decoder=a_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(a_ )
# check we can do a forward pass
A =torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
A =input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
A =model(input_ids=a_ , decoder_input_ids=a_ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
A =AutoTokenizer.from_pretrained("t5-base" )
A =AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
A =MusicgenProcessor(feature_extractor=a_ , tokenizer=a_ )
# set the appropriate bos/pad token ids
A =2048
A =2048
# set other default generation config params
A =int(30 * audio_encoder.config.frame_rate )
A =True
A =3.0
if pytorch_dump_folder is not None:
Path(a_ ).mkdir(exist_ok=a_ )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(a_ )
processor.save_pretrained(a_ )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(a_ )
processor.push_to_hub(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
__a = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 689
| 0
|
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def _A ( lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = SwinConfig()
lowerCAmelCase__ = swin_name.split("_" )
lowerCAmelCase__ = name_split[1]
lowerCAmelCase__ = int(name_split[4] )
lowerCAmelCase__ = int(name_split[3][-1] )
if model_size == "tiny":
lowerCAmelCase__ = 96
lowerCAmelCase__ = (2, 2, 6, 2)
lowerCAmelCase__ = (3, 6, 12, 24)
elif model_size == "small":
lowerCAmelCase__ = 96
lowerCAmelCase__ = (2, 2, 18, 2)
lowerCAmelCase__ = (3, 6, 12, 24)
elif model_size == "base":
lowerCAmelCase__ = 128
lowerCAmelCase__ = (2, 2, 18, 2)
lowerCAmelCase__ = (4, 8, 16, 32)
else:
lowerCAmelCase__ = 192
lowerCAmelCase__ = (2, 2, 18, 2)
lowerCAmelCase__ = (6, 12, 24, 48)
if "in22k" in swin_name:
lowerCAmelCase__ = 2_1841
else:
lowerCAmelCase__ = 1000
lowerCAmelCase__ = "huggingface/label-files"
lowerCAmelCase__ = "imagenet-1k-id2label.json"
lowerCAmelCase__ = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" ) , "r" ) )
lowerCAmelCase__ = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ = img_size
lowerCAmelCase__ = num_classes
lowerCAmelCase__ = embed_dim
lowerCAmelCase__ = depths
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = window_size
return config
def _A ( lowerCAmelCase_ : int ):
"""simple docstring"""
if "patch_embed.proj" in name:
lowerCAmelCase__ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowerCAmelCase__ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
lowerCAmelCase__ = "encoder." + name
if "attn.proj" in name:
lowerCAmelCase__ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCAmelCase__ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCAmelCase__ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCAmelCase__ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCAmelCase__ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCAmelCase__ = name.replace("mlp.fc2" , "output.dense" )
if name == "norm.weight":
lowerCAmelCase__ = "layernorm.weight"
if name == "norm.bias":
lowerCAmelCase__ = "layernorm.bias"
if "head" in name:
lowerCAmelCase__ = name.replace("head" , "classifier" )
else:
lowerCAmelCase__ = "swin." + name
return name
def _A ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ = orig_state_dict.pop(lowerCAmelCase_ )
if "mask" in key:
continue
elif "qkv" in key:
lowerCAmelCase__ = key.split("." )
lowerCAmelCase__ = int(key_split[1] )
lowerCAmelCase__ = int(key_split[3] )
lowerCAmelCase__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[
dim : dim * 2, :
]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[
:dim
]
lowerCAmelCase__ = val[
dim : dim * 2
]
lowerCAmelCase__ = val[
-dim:
]
else:
lowerCAmelCase__ = val
return orig_state_dict
def _A ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = timm.create_model(lowerCAmelCase_ , pretrained=lowerCAmelCase_ )
timm_model.eval()
lowerCAmelCase__ = get_swin_config(lowerCAmelCase_ )
lowerCAmelCase__ = SwinForImageClassification(lowerCAmelCase_ )
model.eval()
lowerCAmelCase__ = convert_state_dict(timm_model.state_dict() , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_" , "-" ) ) )
lowerCAmelCase__ = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
lowerCAmelCase__ = image_processor(images=lowerCAmelCase_ , return_tensors="pt" )
lowerCAmelCase__ = timm_model(inputs["pixel_values"] )
lowerCAmelCase__ = model(**lowerCAmelCase_ ).logits
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swin_name',
default='swin_tiny_patch4_window7_224',
type=str,
help='Name of the Swin timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCamelCase = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 61
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : str = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 614
| 0
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
a_ = False
class snake_case ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : Optional[int] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Tuple ) -> Any:
'''simple docstring'''
_A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_A = torch.manual_seed(0 )
_A = pipe.dual_guided(
prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a__ )
_A = VersatileDiffusionPipeline.from_pretrained(a__ , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = generator.manual_seed(0 )
_A = pipe.dual_guided(
prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = "cyberpunk 2077"
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_A = torch.manual_seed(0 )
_A = pipe.dual_guided(
prompt=a__ , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_A = "A painting of a squirrel eating a burger "
_A = torch.manual_seed(0 )
_A = pipe.text_to_image(
prompt=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_A = pipe.image_variation(a__ , generator=a__ , output_type="numpy" ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 621
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class snake_case ( _UpperCamelCase):
__UpperCamelCase = ['input_features']
def __init__( self : int , a__ : Optional[Any]=80 , a__ : Optional[int]=1_60_00 , a__ : int=1_60 , a__ : Union[str, Any]=30 , a__ : Tuple=4_00 , a__ : List[Any]=0.0 , a__ : Optional[Any]=False , **a__ : List[Any] , ) -> str:
'''simple docstring'''
super().__init__(
feature_size=a__ , sampling_rate=a__ , padding_value=a__ , return_attention_mask=a__ , **a__ , )
_A = n_fft
_A = hop_length
_A = chunk_length
_A = chunk_length * sampling_rate
_A = self.n_samples // hop_length
_A = sampling_rate
_A = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a__ , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=a__ , norm="slaney" , mel_scale="slaney" , )
def a_ ( self : int , a__ : np.array ) -> np.ndarray:
'''simple docstring'''
_A = spectrogram(
a__ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
_A = log_spec[:, :-1]
_A = np.maximum(a__ , log_spec.max() - 8.0 )
_A = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( a__ : List[np.ndarray] , a__ : List[np.ndarray] , a__ : float = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
_A = np.array(a__ , np.intaa )
_A = []
for vector, length in zip(a__ , attention_mask.sum(-1 ) ):
_A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_A = padding_value
normed_input_values.append(a__ )
else:
_A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[int] , a__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a__ : bool = True , a__ : Optional[int] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : Optional[bool] = None , a__ : Optional[str] = "max_length" , a__ : Optional[int] = None , a__ : Optional[int] = None , a__ : Optional[bool] = None , **a__ : Dict , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_A = isinstance(a__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_A = is_batched_numpy or (
isinstance(a__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a__ , np.ndarray ):
_A = np.asarray(a__ , dtype=np.floataa )
elif isinstance(a__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [np.asarray([raw_speech] ).T]
_A = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
_A = self.pad(
a__ , padding=a__ , max_length=max_length if max_length else self.n_samples , truncation=a__ , pad_to_multiple_of=a__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_A = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
_A = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
_A = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
_A = [self._np_extract_fbank_features(a__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , a__ ):
_A = [np.asarray(a__ , dtype=np.floataa ) for feature in input_features]
else:
_A = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_A = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
_A = padded_inputs.convert_to_tensors(a__ )
return padded_inputs
def a_ ( self : Dict ) -> Dict[str, Any]:
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 621
| 1
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _snake_case (__lowercase , __lowercase=() , __lowercase=None , __lowercase="no" , __lowercase="29500"):
UpperCamelCase_ = False
UpperCamelCase_ = False
if any(key.startswith('KAGGLE') for key in os.environ.keys()):
UpperCamelCase_ = True
elif "IPython" in sys.modules:
UpperCamelCase_ = 'google.colab' in str(sys.modules['IPython'].get_ipython())
try:
UpperCamelCase_ = PrecisionType(mixed_precision.lower())
except ValueError:
raise ValueError(
f"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""")
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , __lowercase) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.')
if num_processes is None:
UpperCamelCase_ = 8
UpperCamelCase_ = PrepareForLaunch(__lowercase , distributed_type='TPU')
print(f"""Launching a training on {num_processes} TPU cores.""")
xmp.spawn(__lowercase , args=__lowercase , nprocs=__lowercase , start_method='fork')
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.')
else:
print('Launching training on one CPU.')
function(*__lowercase)
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.')
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.')
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.')
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowercase , master_addr='127.0.01' , master_port=__lowercase , mixed_precision=__lowercase):
UpperCamelCase_ = PrepareForLaunch(__lowercase , distributed_type='MULTI_GPU')
print(f"""Launching training on {num_processes} GPUs.""")
try:
start_processes(__lowercase , args=__lowercase , nprocs=__lowercase , start_method='fork')
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.') from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCamelCase_ = '1'
print('Launching training on MPS.')
elif torch.cuda.is_available():
print('Launching training on one GPU.')
else:
print('Launching training on CPU.')
function(*__lowercase)
def _snake_case (__lowercase , __lowercase=() , __lowercase=2):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowercase , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
UpperCamelCase_ = PrepareForLaunch(__lowercase , debug=__lowercase)
start_processes(__lowercase , args=__lowercase , nprocs=__lowercase , start_method='fork')
| 23
|
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowerCAmelCase :List[str] = 16
_lowerCAmelCase :Any = 32
def lowerCamelCase_ (UpperCamelCase__ : int ):
return int(x / 2**20 )
class _UpperCAmelCase :
'''simple docstring'''
def __enter__( self ) -> Union[str, Any]:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_UpperCAmelCase : Optional[int] = torch.cuda.memory_allocated()
return self
def __exit__( self , *A ) -> Any:
gc.collect()
torch.cuda.empty_cache()
_UpperCAmelCase : Optional[int] = torch.cuda.memory_allocated()
_UpperCAmelCase : int = torch.cuda.max_memory_allocated()
_UpperCAmelCase : str = bamb(self.end - self.begin )
_UpperCAmelCase : Any = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCamelCase_ (UpperCamelCase__ : Accelerator , UpperCamelCase__ : int = 16 , UpperCamelCase__ : str = "bert-base-cased" , UpperCamelCase__ : int = 320 , UpperCamelCase__ : int = 160 , ):
_UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
_UpperCAmelCase : int = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': F'train[:{n_train}]', '''validation''': F'validation[:{n_val}]'} )
def tokenize_function(UpperCamelCase__ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase : int = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=UpperCamelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCamelCase__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(UpperCamelCase__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
_UpperCAmelCase : Optional[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
_UpperCAmelCase : Any = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
def lowerCamelCase_ (UpperCamelCase__ : Tuple , UpperCamelCase__ : Any ):
# Initialize accelerator
_UpperCAmelCase : Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : List[str] = config['''lr''']
_UpperCAmelCase : List[Any] = int(config['''num_epochs'''] )
_UpperCAmelCase : Optional[int] = int(config['''seed'''] )
_UpperCAmelCase : Optional[Any] = int(config['''batch_size'''] )
_UpperCAmelCase : Tuple = args.model_name_or_path
set_seed(UpperCamelCase__ )
_UpperCAmelCase , _UpperCAmelCase : str = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Any = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase__ , return_dict=UpperCamelCase__ )
# Instantiate optimizer
_UpperCAmelCase : List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCAmelCase : Optional[int] = optimizer_cls(params=model.parameters() , lr=UpperCamelCase__ )
if accelerator.state.deepspeed_plugin is not None:
_UpperCAmelCase : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
_UpperCAmelCase : str = 1
_UpperCAmelCase : List[Any] = (len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCAmelCase : Optional[int] = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=0 , num_training_steps=UpperCamelCase__ , )
else:
_UpperCAmelCase : str = DummyScheduler(UpperCamelCase__ , total_num_steps=UpperCamelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase : List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCAmelCase : Optional[Any] = 0
# Now we train the model
_UpperCAmelCase : List[str] = {}
for epoch in range(UpperCamelCase__ , UpperCamelCase__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
_UpperCAmelCase : Optional[int] = model(**UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = outputs.loss
_UpperCAmelCase : Tuple = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_UpperCAmelCase : Optional[Any] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F'epoch-{epoch}'] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ():
_UpperCAmelCase : List[Any] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=UpperCamelCase__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=UpperCamelCase__ , )
parser.add_argument(
'''--output_dir''' , type=UpperCamelCase__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=UpperCamelCase__ , default=320 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=UpperCamelCase__ , default=160 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=UpperCamelCase__ , default=1 , help='''Number of train epochs.''' , )
_UpperCAmelCase : List[Any] = parser.parse_args()
_UpperCAmelCase : List[str] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 506
| 0
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_snake_case = logging.get_logger(__name__)
_snake_case = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase=None , **_UpperCAmelCase):
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''')
lowerCAmelCase_ = model
lowerCAmelCase_ = kwargs.get('''model_save_dir''' , _UpperCAmelCase)
lowerCAmelCase_ = kwargs.get('''latest_model_name''' , _UpperCAmelCase)
def __call__( self , **_UpperCAmelCase):
lowerCAmelCase_ = {k: np.array(_UpperCAmelCase) for k, v in kwargs.items()}
return self.model.run(_UpperCAmelCase , _UpperCAmelCase)
@staticmethod
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None):
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''')
lowerCAmelCase_ = '''CPUExecutionProvider'''
return ort.InferenceSession(_UpperCAmelCase , providers=[provider] , sess_options=_UpperCAmelCase)
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase):
lowerCAmelCase_ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowerCAmelCase_ = self.model_save_dir.joinpath(self.latest_model_name)
lowerCAmelCase_ = Path(_UpperCAmelCase).joinpath(_UpperCAmelCase)
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase)
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowerCAmelCase_ = self.model_save_dir.joinpath(_UpperCAmelCase)
if src_path.exists():
lowerCAmelCase_ = Path(_UpperCAmelCase).joinpath(_UpperCAmelCase)
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase)
except shutil.SameFileError:
pass
def lowercase__ ( self , _UpperCAmelCase , **_UpperCAmelCase , ):
if os.path.isfile(_UpperCAmelCase):
logger.error(f'Provided path ({save_directory}) should be a directory, not a file')
return
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase)
# saving model weights/files
self._save_pretrained(_UpperCAmelCase , **_UpperCAmelCase)
@classmethod
def lowercase__ ( cls , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
lowerCAmelCase_ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCAmelCase):
lowerCAmelCase_ = OnnxRuntimeModel.load_model(
os.path.join(_UpperCAmelCase , _UpperCAmelCase) , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase)
lowerCAmelCase_ = Path(_UpperCAmelCase)
# load model from hub
else:
# download model
lowerCAmelCase_ = hf_hub_download(
repo_id=_UpperCAmelCase , filename=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , )
lowerCAmelCase_ = Path(_UpperCAmelCase).parent
lowerCAmelCase_ = Path(_UpperCAmelCase).name
lowerCAmelCase_ = OnnxRuntimeModel.load_model(_UpperCAmelCase , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase)
return cls(model=_UpperCAmelCase , **_UpperCAmelCase)
@classmethod
def lowercase__ ( cls , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
lowerCAmelCase_ = None
if len(str(_UpperCAmelCase).split('''@''')) == 2:
lowerCAmelCase_ , lowerCAmelCase_ = model_id.split('''@''')
return cls._from_pretrained(
model_id=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , **_UpperCAmelCase , )
| 413
|
from __future__ import annotations
from collections.abc import Iterator
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase):
lowerCAmelCase_ = value
lowerCAmelCase_ = None
lowerCAmelCase_ = None
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase):
lowerCAmelCase_ = tree
def lowercase__ ( self , _UpperCAmelCase):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left) + self.depth_first_search(node.right)
)
def __iter__( self):
yield self.depth_first_search(self.tree)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 413
| 1
|
'''simple docstring'''
import heapq
def _lowercase ( lowerCamelCase__ ) -> set[int]:
"""simple docstring"""
__UpperCAmelCase : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
__UpperCAmelCase : int = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__UpperCAmelCase : Optional[int] = heapq.heappop(lowerCamelCase__ )[1][0]
chosen_vertices.add(lowerCamelCase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__UpperCAmelCase : List[str] = elem[1][1].index(lowerCamelCase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_a : Union[str, Any] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 168
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __A (unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=4_00 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=0.9 , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , ):
__UpperCAmelCase : Tuple = size if size is not None else {"shortest_edge": 30}
__UpperCAmelCase : Optional[int] = crop_size if crop_size is not None else {"height": 30, "width": 30}
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : Optional[Any] = batch_size
__UpperCAmelCase : Optional[Any] = num_channels
__UpperCAmelCase : List[Any] = min_resolution
__UpperCAmelCase : Union[str, Any] = max_resolution
__UpperCAmelCase : Optional[int] = do_resize_and_center_crop
__UpperCAmelCase : Any = size
__UpperCAmelCase : Dict = crop_pct
__UpperCAmelCase : Optional[Any] = crop_size
__UpperCAmelCase : Optional[int] = do_normalize
__UpperCAmelCase : Union[str, Any] = image_mean
__UpperCAmelCase : List[str] = image_std
def _snake_case ( self ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __A (__magic_name__ , unittest.TestCase ):
snake_case :Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None
def _snake_case ( self ):
__UpperCAmelCase : str = PoolFormerImageProcessingTester(self )
@property
def _snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , "do_resize_and_center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase_ , "size" ) )
self.assertTrue(hasattr(UpperCamelCase_ , "crop_pct" ) )
self.assertTrue(hasattr(UpperCamelCase_ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase_ , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase_ , "image_std" ) )
def _snake_case ( self ):
__UpperCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30} )
__UpperCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _snake_case ( self ):
pass
def _snake_case ( self ):
# Initialize image_processing
__UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
__UpperCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__UpperCAmelCase : Optional[Any] = image_processing(UpperCamelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _snake_case ( self ):
# Initialize image_processing
__UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
__UpperCAmelCase : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__UpperCAmelCase : Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _snake_case ( self ):
# Initialize image_processing
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
__UpperCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__UpperCAmelCase : int = image_processing(UpperCamelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 168
| 1
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@slow
def _lowerCamelCase ( self :Dict ) -> str:
__UpperCamelCase : List[str] = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
__UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained("google/mt5-small" )
__UpperCamelCase : List[Any] = tokenizer("Hello there" , return_tensors="np" ).input_ids
__UpperCamelCase : str = tokenizer("Hi I am" , return_tensors="np" ).input_ids
__UpperCamelCase : Any = shift_tokens_right(a , model.config.pad_token_id , model.config.decoder_start_token_id )
__UpperCamelCase : List[Any] = model(a , decoder_input_ids=a ).logits
__UpperCamelCase : List[str] = optax.softmax_cross_entropy(a , onehot(a , logits.shape[-1] ) ).mean()
__UpperCamelCase : Optional[Any] = -(labels.shape[-1] * loss.item())
__UpperCamelCase : Optional[int] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 94
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
lowercase : int = parser.parse_args()
lowercase : str = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowercase : int = CLIPImageProcessor()
lowercase : Optional[int] = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
lowercase : int = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 94
| 1
|
'''simple docstring'''
from collections.abc import Sequence
def __UpperCAmelCase ( a_: Sequence[float], a_: bool = False ):
if not arr:
return 0
_UpperCAmelCase : Any = 0 if allow_empty_subarrays else float("-inf" )
_UpperCAmelCase : str = 0.0
for num in arr:
_UpperCAmelCase : str = max(0 if allow_empty_subarrays else num, curr_sum + num )
_UpperCAmelCase : Dict = max(a_, a_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
__a = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'{max_subarray_sum(nums) = }')
| 494
|
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__a = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class A__ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : str = " " ) -> str:
"""simple docstring"""
_UpperCAmelCase : Dict = sentence_delimiter
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : str ) -> Dict:
"""simple docstring"""
return list(lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : List[str] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : str = []
for sent_idx, sentence in enumerate(lowerCAmelCase__ ):
chars.extend(self.process_string(lowerCAmelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCAmelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
__a = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__a = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__a = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__a = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
__a = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
lowerCAmelCase__ , lowerCAmelCase__ , truth_transform=lowerCAmelCase__ , hypothesis_transform=lowerCAmelCase__ , )["wer"]
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Optional[Any] = 0
for prediction, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : List[str] = jiwer.compute_measures(
lowerCAmelCase__ , lowerCAmelCase__ , truth_transform=lowerCAmelCase__ , hypothesis_transform=lowerCAmelCase__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 494
| 1
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( __a ):
"""simple docstring"""
A = (DDPMScheduler,)
def __a ( self ,**__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Tuple = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**lowerCAmelCase_ )
return config
def __a ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def __a ( self ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] ,[0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ ,beta_end=lowerCAmelCase_ )
def __a ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def __a ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def __a ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def __a ( self ):
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ ,prediction_type=lowerCAmelCase_ ,sample_max_value=lowerCAmelCase_ ,)
def __a ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def __a ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def __a ( self ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def __a ( self ):
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = len(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : Any = self.dummy_sample_deter
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase_ ,lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE : Dict = scheduler.step(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,generator=lowerCAmelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
SCREAMING_SNAKE_CASE : Optional[int] = pred_prev_sample
SCREAMING_SNAKE_CASE : str = torch.sum(torch.abs(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __a ( self ):
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = len(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase_ ,lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE : Optional[int] = scheduler.step(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,generator=lowerCAmelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
SCREAMING_SNAKE_CASE : Optional[int] = pred_prev_sample
SCREAMING_SNAKE_CASE : Tuple = torch.sum(torch.abs(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __a ( self ):
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : str = scheduler_class(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
SCREAMING_SNAKE_CASE : str = -1
else:
SCREAMING_SNAKE_CASE : int = timesteps[i + 1]
SCREAMING_SNAKE_CASE : List[str] = scheduler.previous_timestep(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = prev_t.item()
self.assertEqual(lowerCAmelCase_ ,lowerCAmelCase_ )
def __a ( self ):
SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = [100, 87, 50, 51, 0]
with self.assertRaises(lowerCAmelCase_ ,msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def __a ( self ):
SCREAMING_SNAKE_CASE : Any = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = [100, 87, 50, 1, 0]
SCREAMING_SNAKE_CASE : Tuple = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ ,msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ ,timesteps=lowerCAmelCase_ )
def __a ( self ):
SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : str = scheduler_class(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ ,msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' ,):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 719
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__UpperCAmelCase = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A = ['pixel_values']
def __init__( self ,__SCREAMING_SNAKE_CASE = True ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR ,__SCREAMING_SNAKE_CASE = True ,__SCREAMING_SNAKE_CASE = 1 / 255 ,__SCREAMING_SNAKE_CASE = True ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = True ,**__SCREAMING_SNAKE_CASE ,):
super().__init__(**__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = size if size is not None else {'shortest_edge': 224}
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(__SCREAMING_SNAKE_CASE ,default_to_square=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = crop_size if crop_size is not None else {'height': 256, 'width': 256}
SCREAMING_SNAKE_CASE : str = get_size_dict(__SCREAMING_SNAKE_CASE ,param_name='crop_size' )
SCREAMING_SNAKE_CASE : str = do_resize
SCREAMING_SNAKE_CASE : Dict = size
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : List[str] = do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor
SCREAMING_SNAKE_CASE : Optional[Any] = do_center_crop
SCREAMING_SNAKE_CASE : Any = crop_size
SCREAMING_SNAKE_CASE : List[str] = do_flip_channel_order
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = PIL.Image.BILINEAR ,__SCREAMING_SNAKE_CASE = None ,**__SCREAMING_SNAKE_CASE ,):
SCREAMING_SNAKE_CASE : int = get_size_dict(__SCREAMING_SNAKE_CASE ,default_to_square=__SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE : str = get_resize_output_image_size(__SCREAMING_SNAKE_CASE ,size=size['shortest_edge'] ,default_to_square=__SCREAMING_SNAKE_CASE )
return resize(__SCREAMING_SNAKE_CASE ,size=__SCREAMING_SNAKE_CASE ,resample=__SCREAMING_SNAKE_CASE ,data_format=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ,**__SCREAMING_SNAKE_CASE ,):
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__SCREAMING_SNAKE_CASE ,size=(size['height'], size['width']) ,data_format=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ,**__SCREAMING_SNAKE_CASE ,):
return rescale(__SCREAMING_SNAKE_CASE ,scale=__SCREAMING_SNAKE_CASE ,data_format=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ):
return flip_channel_order(__SCREAMING_SNAKE_CASE ,data_format=__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = ChannelDimension.FIRST ,**__SCREAMING_SNAKE_CASE ,):
SCREAMING_SNAKE_CASE : Tuple = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Dict = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : int = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
SCREAMING_SNAKE_CASE : Optional[int] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Dict = get_size_dict(__SCREAMING_SNAKE_CASE ,default_to_square=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : Tuple = get_size_dict(__SCREAMING_SNAKE_CASE ,param_name='crop_size' )
SCREAMING_SNAKE_CASE : Union[str, Any] = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Optional[Any] = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Tuple = [self.resize(image=__SCREAMING_SNAKE_CASE ,size=__SCREAMING_SNAKE_CASE ,resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE : List[str] = [self.center_crop(image=__SCREAMING_SNAKE_CASE ,size=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Dict = [self.rescale(image=__SCREAMING_SNAKE_CASE ,scale=__SCREAMING_SNAKE_CASE ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
SCREAMING_SNAKE_CASE : Optional[int] = [self.flip_channel_order(image=__SCREAMING_SNAKE_CASE ) for image in images]
SCREAMING_SNAKE_CASE : Dict = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) for image in images]
SCREAMING_SNAKE_CASE : Optional[int] = {'pixel_values': images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE ,tensor_type=__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ):
SCREAMING_SNAKE_CASE : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Tuple = target_sizes.numpy()
SCREAMING_SNAKE_CASE : Optional[Any] = []
for idx in range(len(__SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 220
| 0
|
"""simple docstring"""
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
A__ = len(lowerCAmelCase__ )
A__ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
A__ = True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
A__ = False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
A__ = subset[i - 1][j]
if arr[i - 1] <= j:
A__ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 260
|
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
if isinstance(__a , __a ):
A__ = [label.strip() for label in labels.split(',' ) if label.strip()]
return labels
def __call__( self , __a , __a , __a ):
"""simple docstring"""
if len(__a ) == 0 or len(__a ) == 0:
raise ValueError('You must include at least one label and at least one sequence.' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(__a ) )
if isinstance(__a , __a ):
A__ = [sequences]
A__ = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__a )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(_lowerCamelCase )
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self , __a=ZeroShotClassificationArgumentHandler() , *__a , **__a ):
"""simple docstring"""
A__ = args_parser
super().__init__(*__a , **__a )
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.' )
@property
def _UpperCAmelCase ( self ):
"""simple docstring"""
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail' ):
return ind
return -1
def _UpperCAmelCase ( self , __a , __a=True , __a=True , __a=TruncationStrategy.ONLY_FIRST , **__a ):
"""simple docstring"""
A__ = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`' )
A__ = self.tokenizer.eos_token
try:
A__ = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=__a , )
except Exception as e:
if "too short" in str(__a ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
A__ = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _UpperCAmelCase ( self , **__a ):
"""simple docstring"""
if kwargs.get('multi_class' , __a ) is not None:
A__ = kwargs['multi_class']
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.' )
A__ = {}
if "candidate_labels" in kwargs:
A__ = self._args_parser._parse_labels(kwargs['candidate_labels'] )
if "hypothesis_template" in kwargs:
A__ = kwargs['hypothesis_template']
A__ = {}
if "multi_label" in kwargs:
A__ = kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self , __a , *__a , **__a , ):
"""simple docstring"""
if len(__a ) == 0:
pass
elif len(__a ) == 1 and "candidate_labels" not in kwargs:
A__ = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(__a , **__a )
def _UpperCAmelCase ( self , __a , __a=None , __a="This example is {}." ):
"""simple docstring"""
A__ , A__ = self._args_parser(__a , __a , __a )
for i, (candidate_label, sequence_pair) in enumerate(zip(__a , __a ) ):
A__ = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__a ) - 1,
**model_input,
}
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = inputs['candidate_label']
A__ = inputs['sequence']
A__ = {k: inputs[k] for k in self.tokenizer.model_input_names}
A__ = self.model(**__a )
A__ = {
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def _UpperCAmelCase ( self , __a , __a=False ):
"""simple docstring"""
A__ = [outputs['candidate_label'] for outputs in model_outputs]
A__ = [outputs['sequence'] for outputs in model_outputs]
A__ = np.concatenate([output['logits'].numpy() for output in model_outputs] )
A__ = logits.shape[0]
A__ = len(__a )
A__ = N // n
A__ = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__a ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
A__ = self.entailment_id
A__ = -1 if entailment_id == 0 else 0
A__ = reshaped_outputs[..., [contradiction_id, entailment_id]]
A__ = np.exp(__a ) / np.exp(__a ).sum(-1 , keepdims=__a )
A__ = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
A__ = reshaped_outputs[..., self.entailment_id]
A__ = np.exp(__a ) / np.exp(__a ).sum(-1 , keepdims=__a )
A__ = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 260
| 1
|
"""simple docstring"""
from __future__ import annotations
def A_ ( UpperCAmelCase__ ) -> Any:
a : Union[str, Any] = 2
a : Optional[int] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(UpperCAmelCase__ )
if n > 1:
factors.append(UpperCAmelCase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
SCREAMING_SNAKE_CASE__ : Tuple = TypeVar("T")
class A_ ( Generic[T] ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
a : Any | T = None
a : int = len(__UpperCAmelCase )
a : list[T] = [any_type for _ in range(self.N )] + arr
a : str = fnc
self.build()
def lowercase_ ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
a : Optional[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
p += self.N
a : str = v
while p > 1:
a : str = p // 2
a : Optional[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase ) -> T | None: # noqa: E741
a , a : Dict = l + self.N, r + self.N
a : T | None = None
while l <= r:
if l % 2 == 1:
a : Any = self.st[l] if res is None else self.fn(__UpperCAmelCase , self.st[l] )
if r % 2 == 0:
a : Optional[Any] = self.st[r] if res is None else self.fn(__UpperCAmelCase , self.st[r] )
a , a : int = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
SCREAMING_SNAKE_CASE__ : List[Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
SCREAMING_SNAKE_CASE__ : str = SegmentTree(test_array, min)
SCREAMING_SNAKE_CASE__ : Optional[int] = SegmentTree(test_array, max)
SCREAMING_SNAKE_CASE__ : int = SegmentTree(test_array, lambda a, b: a + b)
def A_ ( ) -> None:
for i in range(len(UpperCAmelCase__ ) ):
for j in range(UpperCAmelCase__ , len(UpperCAmelCase__ ) ):
a : List[str] = reduce(UpperCAmelCase__ , test_array[i : j + 1] )
a : List[Any] = reduce(UpperCAmelCase__ , test_array[i : j + 1] )
a : str = reduce(lambda UpperCAmelCase__ , UpperCAmelCase__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(UpperCAmelCase__ , UpperCAmelCase__ )
assert max_range == max_segment_tree.query(UpperCAmelCase__ , UpperCAmelCase__ )
assert sum_range == sum_segment_tree.query(UpperCAmelCase__ , UpperCAmelCase__ )
test_all_segments()
for index, value in test_updates.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 509
| 0
|
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=18 , lowerCamelCase__=30 , lowerCamelCase__=400 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , ) -> int:
lowercase__ : int = size if size is not None else {"""height""": 18, """width""": 18}
lowercase__ : Optional[Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : Union[str, Any] = num_channels
lowercase__ : Tuple = image_size
lowercase__ : str = min_resolution
lowercase__ : Optional[Any] = max_resolution
lowercase__ : Union[str, Any] = do_resize
lowercase__ : Dict = size
lowercase__ : Optional[Any] = do_normalize
def UpperCAmelCase__( self ) -> Any:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_a : List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def UpperCAmelCase__( self ) -> Optional[int]:
lowercase__ : str = ImageGPTImageProcessingTester(self )
@property
def UpperCAmelCase__( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__( self ) -> int:
lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , """clusters""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_normalize""" ) )
def UpperCAmelCase__( self ) -> Any:
lowercase__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
lowercase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def UpperCAmelCase__( self ) -> List[str]:
lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
lowercase__ : Optional[Any] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase__ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Dict:
lowercase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Optional[int] = os.path.join(lowerCamelCase__ , """image_processor.json""" )
image_processor_first.to_json_file(lowerCamelCase__ )
lowercase__ : str = self.image_processing_class.from_json_file(lowerCamelCase__ ).to_dict()
lowercase__ : Optional[int] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> List[str]:
lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCamelCase__ )
lowercase__ : Union[str, Any] = self.image_processing_class.from_pretrained(lowerCamelCase__ ).to_dict()
lowercase__ : Dict = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCamelCase__ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def UpperCAmelCase__( self ) -> Dict:
pass
def _lowerCamelCase ( ):
lowercase__ : Tuple = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
lowercase__ : Optional[int] = Image.open(dataset[4]["""file"""] )
lowercase__ : Union[str, Any] = Image.open(dataset[5]["""file"""] )
lowercase__ : Optional[int] = [imagea, imagea]
return images
@require_vision
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__( self ) -> str:
lowercase__ : Optional[int] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
lowercase__ : int = prepare_images()
# test non-batched
lowercase__ : int = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
lowercase__ : Any = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCamelCase__ )
# test batched
lowercase__ : str = image_processing(lowerCamelCase__ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
lowercase__ : Optional[int] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCamelCase__ )
| 200
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__snake_case = 'src/transformers'
__snake_case = 'docs/source/en'
__snake_case = '.'
def _lowerCamelCase ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any ):
with open(lowerCamelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase__ : List[str] = f.readlines()
# Find the start prompt.
lowercase__ : str = 0
while not lines[start_index].startswith(lowerCamelCase__ ):
start_index += 1
start_index += 1
lowercase__ : Optional[Any] = start_index
while not lines[end_index].startswith(lowerCamelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__snake_case = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
__snake_case = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
__snake_case = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__snake_case = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
__snake_case = direct_transformers_import(TRANSFORMERS_PATH)
def _lowerCamelCase ( lowerCamelCase__ : List[Any] ):
lowercase__ : Union[str, Any] = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , lowerCamelCase__ )
return [m.group(0 ) for m in matches]
def _lowerCamelCase ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int ):
lowercase__ : Any = 2 if text == """✅""" or text == """❌""" else len(lowerCamelCase__ )
lowercase__ : int = (width - text_length) // 2
lowercase__ : List[str] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _lowerCamelCase ( ):
lowercase__ : List[str] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowercase__ : Optional[int] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowercase__ : str = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowercase__ : Tuple = collections.defaultdict(lowerCamelCase__ )
lowercase__ : Optional[Any] = collections.defaultdict(lowerCamelCase__ )
lowercase__ : Optional[int] = collections.defaultdict(lowerCamelCase__ )
lowercase__ : Dict = collections.defaultdict(lowerCamelCase__ )
lowercase__ : Union[str, Any] = collections.defaultdict(lowerCamelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCamelCase__ ):
lowercase__ : Optional[Any] = None
if attr_name.endswith("""Tokenizer""" ):
lowercase__ : List[Any] = slow_tokenizers
lowercase__ : List[str] = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
lowercase__ : Tuple = fast_tokenizers
lowercase__ : str = attr_name[:-13]
elif _re_tf_models.match(lowerCamelCase__ ) is not None:
lowercase__ : Tuple = tf_models
lowercase__ : List[Any] = _re_tf_models.match(lowerCamelCase__ ).groups()[0]
elif _re_flax_models.match(lowerCamelCase__ ) is not None:
lowercase__ : List[Any] = flax_models
lowercase__ : List[Any] = _re_flax_models.match(lowerCamelCase__ ).groups()[0]
elif _re_pt_models.match(lowerCamelCase__ ) is not None:
lowercase__ : Union[str, Any] = pt_models
lowercase__ : Optional[int] = _re_pt_models.match(lowerCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(lowerCamelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
lowercase__ : Tuple = True
break
# Try again after removing the last word in the name
lowercase__ : Union[str, Any] = """""".join(camel_case_split(lowerCamelCase__ )[:-1] )
# Let's build that table!
lowercase__ : Union[str, Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowercase__ : List[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowercase__ : str = [len(lowerCamelCase__ ) + 2 for c in columns]
lowercase__ : Tuple = max([len(lowerCamelCase__ ) for name in model_names] ) + 2
# Build the table per se
lowercase__ : List[Any] = """|""" + """|""".join([_center_text(lowerCamelCase__ , lowerCamelCase__ ) for c, w in zip(lowerCamelCase__ , lowerCamelCase__ )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
lowercase__ : List[Any] = {True: """✅""", False: """❌"""}
for name in model_names:
lowercase__ : int = model_name_to_prefix[name]
lowercase__ : List[str] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCamelCase__ , lowerCamelCase__ ) for l, w in zip(lowerCamelCase__ , lowerCamelCase__ )] ) + "|\n"
return table
def _lowerCamelCase ( lowerCamelCase__ : Union[str, Any]=False ):
lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = _find_text_in_file(
filename=os.path.join(lowerCamelCase__ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
lowercase__ : Union[str, Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCamelCase__ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__snake_case = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 200
| 1
|
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _lowerCamelCase( _a ):
lowercase_ : Optional[int] = """Salesforce/blip-image-captioning-base"""
lowercase_ : List[Any] = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
lowercase_ : Any = """image_captioner"""
lowercase_ : List[Any] = AutoModelForVisionaSeq
lowercase_ : Union[str, Any] = ["""image"""]
lowercase_ : Union[str, Any] = ["""text"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(self, ['vision'])
super().__init__(*__UpperCamelCase, **__UpperCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
return self.pre_processor(images=__UpperCamelCase, return_tensors='pt')
def UpperCamelCase ( self, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
return self.model.generate(**__UpperCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
return self.pre_processor.batch_decode(__UpperCamelCase, skip_special_tokens=__UpperCamelCase)[0].strip()
| 707
|
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
_lowercase : List[Any] = '\n'.join(lowerCamelCase_ )
Path(lowerCamelCase_ ).open('w' ).writelines(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = "patrickvonplaten/t5-tiny-random"
SCREAMING_SNAKE_CASE : List[Any] = "sshleifer/bart-tiny-random"
SCREAMING_SNAKE_CASE : int = "sshleifer/tiny-mbart"
SCREAMING_SNAKE_CASE : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _lowerCamelCase( _a ):
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : int = Path(self.get_auto_remove_tmp_dir()) / 'utest_input.source'
_lowercase : str = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_lowercase : str = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = str(Path(self.get_auto_remove_tmp_dir()) / 'scores.json')
_lowercase : Optional[int] = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_lowercase : Any = F'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(lowerCamelCase, 'argv', lowerCamelCase):
run_generate()
assert Path(lowerCamelCase).exists()
# os.remove(Path(output_file_name))
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
self.run_eval_tester(lowerCamelCase)
@parameterized.expand([BART_TINY, MBART_TINY])
@slow
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
self.run_eval_tester(lowerCamelCase)
@parameterized.expand([T5_TINY, MBART_TINY])
@slow
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : str = Path(self.get_auto_remove_tmp_dir()) / 'utest_input.source'
_lowercase : Any = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_lowercase : List[str] = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
_lowercase : Optional[Any] = Path(self.get_auto_remove_tmp_dir())
_lowercase : Optional[Any] = str(tmp_dir / 'scores.json')
_lowercase : str = str(tmp_dir / 'val.target')
_dump_articles(lowerCamelCase, text['en'])
_dump_articles(lowerCamelCase, text['de'])
_lowercase : Tuple = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_lowercase : Tuple = F'''
run_eval_search.py
{model}
{str(lowerCamelCase)}
{str(lowerCamelCase)}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'])
with patch.object(lowerCamelCase, 'argv', lowerCamelCase):
with CaptureStdout() as cs:
run_search()
_lowercase : Dict = [' num_beams | length_penalty', model, 'Best score args']
_lowercase : Optional[Any] = ['Info']
if "translation" in task:
expected_strings.append('bleu')
else:
expected_strings.extend(lowerCamelCase)
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowerCamelCase).exists()
os.remove(Path(lowerCamelCase))
| 354
| 0
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE__ = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def A ( __UpperCamelCase=None ) -> Union[str, Any]:
if subparsers is not None:
A__ = subparsers.add_parser('tpu-config' , description=_description )
else:
A__ = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
A__ = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=__UpperCamelCase , default=__UpperCamelCase , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=__UpperCamelCase , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=__UpperCamelCase , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
A__ = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=__UpperCamelCase , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def A ( __UpperCamelCase ) -> Optional[Any]:
A__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__UpperCamelCase ):
A__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
A__ = defaults.command_file
if not args.command and defaults.commands is not None:
A__ = defaults.commands
if not args.tpu_name:
A__ = defaults.tpu_name
if not args.tpu_zone:
A__ = defaults.tpu_zone
if args.accelerate_version == "dev":
A__ = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
A__ = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , __UpperCamelCase ):
A__ = f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
A__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __UpperCamelCase ):
A__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
A__ = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
A__ = '; '.join(__UpperCamelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
A__ = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {" ".join(__UpperCamelCase )}''' )
return
subprocess.run(__UpperCamelCase )
print('Successfully setup pod.' )
def A ( ) -> Optional[Any]:
A__ = tpu_command_parser()
A__ = parser.parse_args()
tpu_command_launcher(__UpperCamelCase )
| 9
|
'''simple docstring'''
import math
def __snake_case ( lowercase : Optional[Any] , lowercase : List[str] ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowercase )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowercase__ = '''Enter the base and the power separated by a comma: '''
lowercase__ , lowercase__ = map(int, input(prompt).split(''','''))
lowercase__ , lowercase__ = map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowercase__ = res(xa, ya)
lowercase__ = res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 508
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Dict = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : int = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_A : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 189
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_A : str = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
_A : Optional[Any] = parser.parse_args()
_A : str = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_A : Optional[Any] = CLIPImageProcessor()
_A : List[Any] = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
_A : str = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 189
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Tuple = 'ibert'
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=3_05_22 , SCREAMING_SNAKE_CASE__ : List[Any]=7_68 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=30_72 , SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=5_12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-12 , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Tuple="absolute" , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Tuple="none" , **SCREAMING_SNAKE_CASE__ : Dict , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = position_embedding_type
lowerCamelCase__ = quant_mode
lowerCamelCase__ = force_dequant
class _a ( SCREAMING_SNAKE_CASE_ ):
@property
def _UpperCamelCase ( self : Optional[Any] ):
if self.task == "multiple-choice":
lowerCamelCase__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 510
|
"""simple docstring"""
def snake_case ( _a: list )-> bool:
'''simple docstring'''
if not isinstance(_a , _a ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_a ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(_a ) == 1:
return True
lowerCamelCase__ = series[1] - series[0]
for index in range(len(_a ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def snake_case ( _a: list )-> float:
'''simple docstring'''
if not isinstance(_a , _a ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_a ) == 0:
raise ValueError('Input list must be a non empty list' )
lowerCamelCase__ = 0
for val in series:
answer += val
return answer / len(_a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 510
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class snake_case__ ( unittest.TestCase ):
def A_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : List[str] = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def A_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Tuple = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , )
return model
@property
def A_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[Any] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
__snake_case : Tuple = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__snake_case : Optional[int] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__snake_case : int = DDPMScheduler()
__snake_case : str = AudioDiffusionPipeline(vqvae=_UpperCamelCase , unet=self.dummy_unet , mel=_UpperCamelCase , scheduler=_UpperCamelCase )
__snake_case : Optional[int] = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
__snake_case : Any = torch.Generator(device=_UpperCamelCase ).manual_seed(42 )
__snake_case : Union[str, Any] = pipe(generator=_UpperCamelCase , steps=4 )
__snake_case : List[str] = output.audios[0]
__snake_case : Tuple = output.images[0]
__snake_case : Optional[int] = torch.Generator(device=_UpperCamelCase ).manual_seed(42 )
__snake_case : List[str] = pipe(generator=_UpperCamelCase , steps=4 , return_dict=_UpperCamelCase )
__snake_case : Optional[int] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__snake_case : Any = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
__snake_case : str = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10]
__snake_case : int = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__snake_case : List[Any] = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__snake_case : str = DDIMScheduler()
__snake_case : Any = self.dummy_vqvae_and_unet
__snake_case : Optional[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_UpperCamelCase , scheduler=_UpperCamelCase )
__snake_case : Optional[Any] = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
np.random.seed(0 )
__snake_case : List[str] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__snake_case : Optional[int] = torch.Generator(device=_UpperCamelCase ).manual_seed(42 )
__snake_case : List[str] = pipe(raw_audio=_UpperCamelCase , generator=_UpperCamelCase , start_step=5 , steps=10 )
__snake_case : List[Any] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__snake_case : int = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
__snake_case : Optional[Any] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__snake_case : Any = self.dummy_unet_condition
__snake_case : int = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_UpperCamelCase , mel=_UpperCamelCase , scheduler=_UpperCamelCase )
__snake_case : Optional[int] = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
np.random.seed(0 )
__snake_case : Dict = torch.rand((1, 1, 10) )
__snake_case : List[Any] = pipe(generator=_UpperCamelCase , encoding=_UpperCamelCase )
__snake_case : Tuple = output.images[0]
__snake_case : int = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
__snake_case : List[str] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def A_ ( self : str ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = torch_device
__snake_case : Optional[Any] = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
__snake_case : int = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
__snake_case : Union[str, Any] = torch.Generator(device=_UpperCamelCase ).manual_seed(42 )
__snake_case : Union[str, Any] = pipe(generator=_UpperCamelCase )
__snake_case : Tuple = output.audios[0]
__snake_case : List[str] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__snake_case : Dict = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
__snake_case : int = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 715
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''openai/whisper-base'''
A__ = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
A__ = '''transcriber'''
A__ = WhisperProcessor
A__ = WhisperForConditionalGeneration
A__ = ['''audio''']
A__ = ['''text''']
def A_ ( self : Any , __a : int ) -> Dict:
'''simple docstring'''
return self.pre_processor(__a , return_tensors='pt' ).input_features
def A_ ( self : Optional[int] , __a : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.model.generate(inputs=__a )
def A_ ( self : str , __a : str ) -> List[str]:
'''simple docstring'''
return self.pre_processor.batch_decode(__a , skip_special_tokens=__a )[0]
| 124
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_a : Tuple = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_a : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 598
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def a_ ( __magic_name__ ) -> Any:
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def a_ ( ) -> List[str]:
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Optional[Any] = '''mock-s3-bucket'''
snake_case : Optional[Any] = F"s3://{mock_bucket}"
snake_case : Tuple = extract_path_from_uri(__magic_name__ )
assert dataset_path.startswith('''s3://''' ) is False
snake_case : List[Any] = '''./local/path'''
snake_case : Any = extract_path_from_uri(__magic_name__ )
assert dataset_path == new_dataset_path
def a_ ( __magic_name__ ) -> List[str]:
"""simple docstring"""
snake_case : Any = is_remote_filesystem(__magic_name__ )
assert is_remote is True
snake_case : Dict = fsspec.filesystem('''file''' )
snake_case : List[Any] = is_remote_filesystem(__magic_name__ )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __magic_name__ )
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
"""simple docstring"""
snake_case : Tuple = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
snake_case : Optional[int] = input_paths[compression_fs_class.protocol]
if input_path is None:
snake_case : Any = F"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__magic_name__ )
snake_case : Optional[Any] = fsspec.filesystem(compression_fs_class.protocol , fo=__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
snake_case : List[Any] = os.path.basename(__magic_name__ )
snake_case : Optional[int] = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f, open(__magic_name__ , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
"""simple docstring"""
snake_case : Tuple = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
snake_case : Optional[int] = compressed_file_paths[protocol]
snake_case : str = '''dataset.jsonl'''
snake_case : List[Any] = F"{protocol}://{member_file_path}::{compressed_file_path}"
snake_case , *snake_case : str = fsspec.get_fs_token_paths(__magic_name__ )
assert fs.isfile(__magic_name__ )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
"""simple docstring"""
snake_case : List[Any] = hf_api.dataset_info(__magic_name__ , token=__magic_name__ )
snake_case : List[str] = HfFileSystem(repo_info=__magic_name__ , token=__magic_name__ )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__magic_name__ ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def a_ ( ) -> List[Any]:
"""simple docstring"""
snake_case : List[str] = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__magic_name__ , __magic_name__ , clobber=__magic_name__ )
with pytest.warns(__magic_name__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__magic_name__ ) == 1
assert (
str(warning_info[0].message )
== F"A filesystem protocol was already set for {protocol} and will be overwritten."
)
| 598
| 1
|
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : Any = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = "owlvit_text_model"
def __init__( self , snake_case_=4_9_4_0_8 , snake_case_=5_1_2 , snake_case_=2_0_4_8 , snake_case_=1_2 , snake_case_=8 , snake_case_=1_6 , snake_case_="quick_gelu" , snake_case_=1e-5 , snake_case_=0.0 , snake_case_=0.0_2 , snake_case_=1.0 , snake_case_=0 , snake_case_=4_9_4_0_6 , snake_case_=4_9_4_0_7 , **snake_case_ , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = max_position_embeddings
__lowercase = hidden_act
__lowercase = layer_norm_eps
__lowercase = attention_dropout
__lowercase = initializer_range
__lowercase = initializer_factor
@classmethod
def A ( cls , snake_case_ , **snake_case_ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(snake_case_ )
__lowercase , __lowercase = cls.get_config_dict(snake_case_ , **snake_case_ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
__lowercase = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = "owlvit_vision_model"
def __init__( self , snake_case_=7_6_8 , snake_case_=3_0_7_2 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3 , snake_case_=7_6_8 , snake_case_=3_2 , snake_case_="quick_gelu" , snake_case_=1e-5 , snake_case_=0.0 , snake_case_=0.0_2 , snake_case_=1.0 , **snake_case_ , ) -> Any:
'''simple docstring'''
super().__init__(**snake_case_ )
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = num_channels
__lowercase = image_size
__lowercase = patch_size
__lowercase = hidden_act
__lowercase = layer_norm_eps
__lowercase = attention_dropout
__lowercase = initializer_range
__lowercase = initializer_factor
@classmethod
def A ( cls , snake_case_ , **snake_case_ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(snake_case_ )
__lowercase , __lowercase = cls.get_config_dict(snake_case_ , **snake_case_ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
__lowercase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = "owlvit"
__UpperCAmelCase = True
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=5_1_2 , snake_case_=2.6_5_9_2 , snake_case_=True , **snake_case_ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**snake_case_ )
if text_config is None:
__lowercase = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
__lowercase = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
__lowercase = OwlViTTextConfig(**snake_case_ )
__lowercase = OwlViTVisionConfig(**snake_case_ )
__lowercase = projection_dim
__lowercase = logit_scale_init_value
__lowercase = return_dict
__lowercase = 1.0
@classmethod
def A ( cls , snake_case_ , **snake_case_ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(snake_case_ )
__lowercase , __lowercase = cls.get_config_dict(snake_case_ , **snake_case_ )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
@classmethod
def A ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> int:
'''simple docstring'''
__lowercase = {}
__lowercase = text_config
__lowercase = vision_config
return cls.from_dict(snake_case_ , **snake_case_ )
def A ( self ) -> Any:
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.text_config.to_dict()
__lowercase = self.vision_config.to_dict()
__lowercase = self.__class__.model_type
return output
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def A ( self ) -> float:
'''simple docstring'''
return 1e-4
def A ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__lowercase = super().generate_dummy_inputs(
processor.tokenizer , batch_size=snake_case_ , seq_length=snake_case_ , framework=snake_case_ )
__lowercase = super().generate_dummy_inputs(
processor.image_processor , batch_size=snake_case_ , framework=snake_case_ )
return {**text_input_dict, **image_input_dict}
@property
def A ( self ) -> int:
'''simple docstring'''
return 1_4
| 527
|
from itertools import count
def lowercase_ ( _UpperCamelCase = 50 ):
'''simple docstring'''
__lowercase = [1] * min_block_length
for n in count(_UpperCamelCase ):
fill_count_functions.append(1 )
for block_length in range(_UpperCamelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 527
| 1
|
lowerCamelCase__ = {str(digit): digit**5 for digit in range(10)}
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase__ ( ):
"""simple docstring"""
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
print(solution())
| 225
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Tuple=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : List[str] =list_field(
default=[] , metadata={
'help': (
'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'
' of all available models'
)
} , )
__lowerCamelCase : List[int] =list_field(
default=[8] , metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} )
__lowerCamelCase : List[int] =list_field(
default=[8, 32, 128, 512] , metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'} , )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'} , )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'} , )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Use FP16 to accelerate inference.'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Benchmark training of model'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Verbose memory tracing'} )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'} , )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={
'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'
} , )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Trace memory line by line'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Save result to a CSV file'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Save all print statements in a log file'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Whether to print environment information'} )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={
'help': (
'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'
' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'
' for debugging / testing and on TPU.'
)
} , )
__lowerCamelCase : str =field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving time results to csv.'} , )
__lowerCamelCase : str =field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving memory results to csv.'} , )
__lowerCamelCase : str =field(
default=F'''train_time_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving time results to csv for training.'} , )
__lowerCamelCase : str =field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving memory results to csv for training.'} , )
__lowerCamelCase : str =field(
default=F'''env_info_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving environment information.'} , )
__lowerCamelCase : str =field(
default=F'''log_{round(time() )}.csv''' , metadata={'help': 'Log filename used if print statements are saved in log.'} , )
__lowerCamelCase : int =field(default=3 , metadata={'help': 'Times an experiment will be run.'} )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={
'help': (
'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'
' model weights.'
)
} , )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
F"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , __lowercase , )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 225
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 707
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: str = "cpu" , SCREAMING_SNAKE_CASE_: Union[str, None] = None ) -> None:
'''simple docstring'''
A__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location=SCREAMING_SNAKE_CASE_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
A__ = v.half()
if save_path is None: # overwrite src_path
A__ = src_path
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
fire.Fire(convert)
| 626
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = TFXLMRobertaModel.from_pretrained('jplu/tf-xlm-roberta-base' )
SCREAMING_SNAKE_CASE_ = {
'input_ids': tf.convert_to_tensor([[0, 2_646, 10_269, 83, 99_942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'attention_mask': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )['last_hidden_state']
SCREAMING_SNAKE_CASE_ = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _lowerCAmelCase )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 31
|
from __future__ import annotations
def a_ ( UpperCamelCase_ : int | str ) -> bool:
"""simple docstring"""
lowerCamelCase = str(UpperCamelCase_ )
return n == n[::-1]
def a_ ( UpperCamelCase_ : int = 1_0_0_0_0_0_0 ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase = 0
for i in range(1 , UpperCamelCase_ ):
if is_palindrome(UpperCamelCase_ ) and is_palindrome(bin(UpperCamelCase_ ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 246
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["CLIPFeatureExtractor"]
lowerCAmelCase__ = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 705
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class a__ :
"""simple docstring"""
def __init__( self , lowercase ) -> int:
'''simple docstring'''
A__ = data
A__ = None
class a__ :
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
A__ = None
A__ = None
def __iter__( self ) -> Iterator[Any]:
'''simple docstring'''
A__ = self.head
while self.head:
yield node.data
A__ = node.next
if node == self.head:
break
def __len__( self ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ) -> List[Any]:
'''simple docstring'''
return "->".join(str(lowercase ) for item in iter(self ) )
def UpperCamelCase ( self , lowercase ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , lowercase )
def UpperCamelCase ( self , lowercase ) -> None:
'''simple docstring'''
self.insert_nth(0 , lowercase )
def UpperCamelCase ( self , lowercase , lowercase ) -> None:
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
A__ = Node(lowercase )
if self.head is None:
A__ = new_node # first node points itself
A__ = A__ = new_node
elif index == 0: # insert at head
A__ = self.head
A__ = A__ = new_node
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = new_node
if index == len(self ) - 1: # insert at tail
A__ = new_node
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
return self.delete_nth(0 )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase ( self , lowercase = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
A__ = self.head
if self.head == self.tail: # just one node
A__ = A__ = None
elif index == 0: # delete head node
A__ = self.tail.next.next
A__ = self.head.next
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = temp.next.next
if index == len(self ) - 1: # delete at tail
A__ = temp
return delete_node.data
def UpperCamelCase ( self ) -> bool:
'''simple docstring'''
return len(self ) == 0
def lowerCAmelCase__ ( ) -> None:
'''simple docstring'''
A__ = CircularLinkedList()
assert len(SCREAMING_SNAKE_CASE_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(SCREAMING_SNAKE_CASE_ ) == i
circular_linked_list.insert_nth(SCREAMING_SNAKE_CASE_ , i + 1 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 626
| 0
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = [1]
for i in range(2 , SCREAMING_SNAKE_CASE_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
lowercase__ = []
lowercase__ = list(range(SCREAMING_SNAKE_CASE_ ) )
# Find permutation
while factorials:
lowercase__ = factorials.pop()
lowercase__ , lowercase__ = divmod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 413
|
'''simple docstring'''
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : list[list[int]] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ):
# Base Case
if index == len(SCREAMING_SNAKE_CASE ):
return True
# Recursive Step
for i in range(SCREAMING_SNAKE_CASE ):
if valid_coloring(graph[index] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# Color current vertex
UpperCAmelCase = i
# Validate coloring
if util_color(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 ):
return True
# Backtrack
UpperCAmelCase = -1
return False
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : list[list[int]] , SCREAMING_SNAKE_CASE : int ):
UpperCAmelCase = [-1] * len(SCREAMING_SNAKE_CASE )
if util_color(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 0 ):
return colored_vertices
return []
| 447
| 0
|
'''simple docstring'''
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Optional[int] , *lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Optional[Any]=None , **lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
__lowercase = eval_examples
__lowercase = post_process_function
def UpperCAmelCase_ ( self : List[str] , lowerCamelCase__ : Optional[Dataset] = None , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : Optional[List[str]] = None , lowerCamelCase__ : str = "eval" , **lowerCamelCase__ : Dict , ) -> Dict[str, float]:
"""simple docstring"""
__lowercase = gen_kwargs.copy()
__lowercase = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
__lowercase = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
__lowercase = gen_kwargs
__lowercase = self.eval_dataset if eval_dataset is None else eval_dataset
__lowercase = self.get_eval_dataloader(lowerCamelCase__ )
__lowercase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__lowercase = self.compute_metrics
__lowercase = None
__lowercase = time.time()
__lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowercase = eval_loop(
lowerCamelCase__ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase__ , metric_key_prefix=lowerCamelCase__ , )
finally:
__lowercase = compute_metrics
__lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowerCamelCase__ , lowerCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__lowercase = self.post_process_function(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowercase = self.compute_metrics(lowerCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
__lowercase = metrics.pop(lowerCamelCase__ )
metrics.update(output.metrics )
else:
__lowercase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCamelCase__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__lowercase = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCamelCase__ )
return metrics
def UpperCAmelCase_ ( self : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : str = "test" , **lowerCamelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = gen_kwargs.copy()
__lowercase = self.get_test_dataloader(lowerCamelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
__lowercase = self.compute_metrics
__lowercase = None
__lowercase = time.time()
__lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowercase = eval_loop(
lowerCamelCase__ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase__ , metric_key_prefix=lowerCamelCase__ , )
finally:
__lowercase = compute_metrics
__lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowerCamelCase__ , lowerCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__lowercase = self.post_process_function(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , '''predict''' )
__lowercase = self.compute_metrics(lowerCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
__lowercase = metrics.pop(lowerCamelCase__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCamelCase__ )
| 716
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[str] , lowerCamelCase__ : float ) -> float:
"""simple docstring"""
return 0.0
def _A( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int ) -> tuple[int | float, int | float]:
'''simple docstring'''
__lowercase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__lowercase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def _A( UpperCamelCase__ : FilterType , UpperCamelCase__ : int ) -> None:
'''simple docstring'''
__lowercase = 512
__lowercase = [1] + [0] * (size - 1)
__lowercase = [filter_type.process(UpperCamelCase__ ) for item in inputs]
__lowercase = [0] * (samplerate - size) # zero-padding
outputs += filler
__lowercase = np.abs(np.fft.fft(UpperCamelCase__ ) )
__lowercase = 20 * np.logaa(UpperCamelCase__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
__lowercase = get_bounds(UpperCamelCase__ , UpperCamelCase__ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(UpperCamelCase__ )
plt.show()
def _A( UpperCamelCase__ : FilterType , UpperCamelCase__ : int ) -> None:
'''simple docstring'''
__lowercase = 512
__lowercase = [1] + [0] * (size - 1)
__lowercase = [filter_type.process(UpperCamelCase__ ) for item in inputs]
__lowercase = [0] * (samplerate - size) # zero-padding
outputs += filler
__lowercase = np.angle(np.fft.fft(UpperCamelCase__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(UpperCamelCase__ , -2 * pi ) )
plt.show()
| 362
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a: Union[str, Any] = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Union[str, Any] = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__a: Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 152
|
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__a: Tuple = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__a: Dict = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Optional[int] = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=UpperCAmelCase )[0]
@deprecated(UpperCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __UpperCamelCase ( UpperCAmelCase ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=UpperCAmelCase ) as bytestream:
lowercase__ : int = _readaa(UpperCAmelCase )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
lowercase__ : Optional[Any] = _readaa(UpperCAmelCase )
lowercase__ : List[Any] = _readaa(UpperCAmelCase )
lowercase__ : Any = _readaa(UpperCAmelCase )
lowercase__ : Tuple = bytestream.read(rows * cols * num_images )
lowercase__ : Union[str, Any] = numpy.frombuffer(UpperCAmelCase , dtype=numpy.uinta )
lowercase__ : int = data.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , 1 )
return data
@deprecated(UpperCAmelCase , '''Please use tf.one_hot on tensors.''' )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : int = labels_dense.shape[0]
lowercase__ : List[Any] = numpy.arange(UpperCAmelCase ) * num_classes
lowercase__ : str = numpy.zeros((num_labels, num_classes) )
lowercase__ : int = 1
return labels_one_hot
@deprecated(UpperCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=10 ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=UpperCAmelCase ) as bytestream:
lowercase__ : Tuple = _readaa(UpperCAmelCase )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
lowercase__ : int = _readaa(UpperCAmelCase )
lowercase__ : Union[str, Any] = bytestream.read(UpperCAmelCase )
lowercase__ : str = numpy.frombuffer(UpperCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(UpperCAmelCase , UpperCAmelCase )
return labels
class UpperCAmelCase :
'''simple docstring'''
@deprecated(
__lowerCAmelCase , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=dtypes.floataa , __lowerCAmelCase=True , __lowerCAmelCase=None , ) -> Any:
lowercase__ , lowercase__ : str = random_seed.get_seed(__lowerCAmelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowercase__ : List[str] = dtypes.as_dtype(__lowerCAmelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
lowercase__ : str = 10000
lowercase__ : Any = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F"""images.shape: {images.shape} labels.shape: {labels.shape}"""
lowercase__ : Any = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowercase__ : Optional[int] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowercase__ : List[Any] = images.astype(numpy.floataa )
lowercase__ : Any = numpy.multiply(__lowerCAmelCase , 1.0 / 2_5_5.0 )
lowercase__ : int = images
lowercase__ : int = labels
lowercase__ : Any = 0
lowercase__ : int = 0
@property
def _lowerCAmelCase( self ) -> str:
return self._images
@property
def _lowerCAmelCase( self ) -> Tuple:
return self._labels
@property
def _lowerCAmelCase( self ) -> Tuple:
return self._num_examples
@property
def _lowerCAmelCase( self ) -> Tuple:
return self._epochs_completed
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=True ) -> List[str]:
if fake_data:
lowercase__ : Optional[int] = [1] * 784
lowercase__ : Tuple = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__lowerCAmelCase )],
[fake_label for _ in range(__lowerCAmelCase )],
)
lowercase__ : Optional[int] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowercase__ : Any = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCAmelCase )
lowercase__ : Optional[Any] = self.images[perma]
lowercase__ : str = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowercase__ : Optional[int] = self._num_examples - start
lowercase__ : Any = self._images[start : self._num_examples]
lowercase__ : List[str] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowercase__ : Tuple = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCAmelCase )
lowercase__ : Optional[int] = self.images[perm]
lowercase__ : List[Any] = self.labels[perm]
# Start next epoch
lowercase__ : List[Any] = 0
lowercase__ : Optional[Any] = batch_size - rest_num_examples
lowercase__ : str = self._index_in_epoch
lowercase__ : List[str] = self._images[start:end]
lowercase__ : str = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowercase__ : Optional[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(UpperCAmelCase , '''Please write your own downloading logic.''' )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if not gfile.Exists(UpperCAmelCase ):
gfile.MakeDirs(UpperCAmelCase )
lowercase__ : List[str] = os.path.join(UpperCAmelCase , UpperCAmelCase )
if not gfile.Exists(UpperCAmelCase ):
urllib.request.urlretrieve(UpperCAmelCase , UpperCAmelCase ) # noqa: S310
with gfile.GFile(UpperCAmelCase ) as f:
lowercase__ : Tuple = f.size()
print('''Successfully downloaded''' , UpperCAmelCase , UpperCAmelCase , '''bytes.''' )
return filepath
@deprecated(
UpperCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=dtypes.floataa , UpperCAmelCase=True , UpperCAmelCase=5000 , UpperCAmelCase=None , UpperCAmelCase=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=UpperCAmelCase , one_hot=UpperCAmelCase , dtype=UpperCAmelCase , seed=UpperCAmelCase )
lowercase__ : Any = fake()
lowercase__ : Optional[int] = fake()
lowercase__ : Optional[int] = fake()
return _Datasets(train=UpperCAmelCase , validation=UpperCAmelCase , test=UpperCAmelCase )
if not source_url: # empty string check
lowercase__ : Tuple = DEFAULT_SOURCE_URL
lowercase__ : Tuple = '''train-images-idx3-ubyte.gz'''
lowercase__ : List[str] = '''train-labels-idx1-ubyte.gz'''
lowercase__ : Optional[int] = '''t10k-images-idx3-ubyte.gz'''
lowercase__ : str = '''t10k-labels-idx1-ubyte.gz'''
lowercase__ : Optional[Any] = _maybe_download(
UpperCAmelCase , UpperCAmelCase , source_url + train_images_file )
with gfile.Open(UpperCAmelCase , '''rb''' ) as f:
lowercase__ : Optional[int] = _extract_images(UpperCAmelCase )
lowercase__ : Optional[Any] = _maybe_download(
UpperCAmelCase , UpperCAmelCase , source_url + train_labels_file )
with gfile.Open(UpperCAmelCase , '''rb''' ) as f:
lowercase__ : Union[str, Any] = _extract_labels(UpperCAmelCase , one_hot=UpperCAmelCase )
lowercase__ : Any = _maybe_download(
UpperCAmelCase , UpperCAmelCase , source_url + test_images_file )
with gfile.Open(UpperCAmelCase , '''rb''' ) as f:
lowercase__ : Tuple = _extract_images(UpperCAmelCase )
lowercase__ : Optional[int] = _maybe_download(
UpperCAmelCase , UpperCAmelCase , source_url + test_labels_file )
with gfile.Open(UpperCAmelCase , '''rb''' ) as f:
lowercase__ : Tuple = _extract_labels(UpperCAmelCase , one_hot=UpperCAmelCase )
if not 0 <= validation_size <= len(UpperCAmelCase ):
lowercase__ : Optional[int] = (
'''Validation size should be between 0 and '''
F"""{len(UpperCAmelCase )}. Received: {validation_size}."""
)
raise ValueError(UpperCAmelCase )
lowercase__ : Optional[int] = train_images[:validation_size]
lowercase__ : List[str] = train_labels[:validation_size]
lowercase__ : Tuple = train_images[validation_size:]
lowercase__ : Optional[int] = train_labels[validation_size:]
lowercase__ : List[str] = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
lowercase__ : str = _DataSet(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
lowercase__ : int = _DataSet(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
lowercase__ : List[Any] = _DataSet(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
return _Datasets(train=UpperCAmelCase , validation=UpperCAmelCase , test=UpperCAmelCase )
| 152
| 1
|
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowercase__ = logging.getLogger(__name__)
lowercase__ = "Hello world! cécé herlolip"
lowercase__ = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def __UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : List[str] ) -> Dict:
'''simple docstring'''
_a = BertAbsConfig(
temp_dir="." , finetune_bert=__lowerCamelCase , large=__lowerCamelCase , share_emb=__lowerCamelCase , use_bert_emb=__lowerCamelCase , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
_a = torch.load(__lowerCamelCase , lambda __lowerCamelCase , __lowerCamelCase : storage )
_a = AbsSummarizer(__lowerCamelCase , torch.device("cpu" ) , __lowerCamelCase )
original.eval()
_a = BertAbsSummarizer(__lowerCamelCase , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
_a = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
_a = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__lowerCamelCase )) )
_a = torch.tensor(__lowerCamelCase ).unsqueeze(0 )
_a = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__lowerCamelCase )) )
_a = torch.tensor(__lowerCamelCase ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
_a = encoder_input_ids
_a = decoder_input_ids
_a = _a = None
_a = None
_a = _a = None
_a = _a = None
_a = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
_a = original(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )[0]
_a = original.generator(__lowerCamelCase )
_a = new_model(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )[0]
_a = new_model.generator(__lowerCamelCase )
_a = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(__lowerCamelCase ) )
_a = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(__lowerCamelCase ) )
_a = torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
lowercase__ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 276
|
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __UpperCamelCase ( __lowerCamelCase : BertModel , __lowerCamelCase : str , __lowerCamelCase : str ) -> List[str]:
'''simple docstring'''
_a = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_a = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__lowerCamelCase ):
os.makedirs(__lowerCamelCase )
_a = model.state_dict()
def to_tf_var_name(__lowerCamelCase : str ):
for patt, repl in iter(__lowerCamelCase ):
_a = name.replace(__lowerCamelCase , __lowerCamelCase )
return F"bert/{name}"
def create_tf_var(__lowerCamelCase : np.ndarray , __lowerCamelCase : str , __lowerCamelCase : tf.Session ):
_a = tf.dtypes.as_dtype(tensor.dtype )
_a = tf.get_variable(dtype=__lowerCamelCase , shape=tensor.shape , name=__lowerCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__lowerCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_a = to_tf_var_name(__lowerCamelCase )
_a = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_a = torch_tensor.T
_a = create_tf_var(tensor=__lowerCamelCase , name=__lowerCamelCase , session=__lowerCamelCase )
tf.keras.backend.set_value(__lowerCamelCase , __lowerCamelCase )
_a = session.run(__lowerCamelCase )
print(F"Successfully created {tf_name}: {np.allclose(__lowerCamelCase , __lowerCamelCase )}" )
_a = tf.train.Saver(tf.trainable_variables() )
saver.save(__lowerCamelCase , os.path.join(__lowerCamelCase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def __UpperCamelCase ( __lowerCamelCase : str=None ) -> Optional[int]:
'''simple docstring'''
_a = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__lowerCamelCase , required=__lowerCamelCase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=__lowerCamelCase , default=__lowerCamelCase , required=__lowerCamelCase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=__lowerCamelCase , required=__lowerCamelCase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=__lowerCamelCase , required=__lowerCamelCase , help="Directory in which to save tensorflow model" )
_a = parser.parse_args(__lowerCamelCase )
_a = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__lowerCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 276
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : str = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class __A (__magic_name__ ):
snake_case :Optional[int] = "xlm-roberta"
def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : List[str] = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : str = max_position_embeddings
__UpperCAmelCase : Dict = type_vocab_size
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : Dict = layer_norm_eps
__UpperCAmelCase : Dict = position_embedding_type
__UpperCAmelCase : str = use_cache
__UpperCAmelCase : int = classifier_dropout
class __A (__magic_name__ ):
@property
def _snake_case ( self ):
if self.task == "multiple-choice":
__UpperCAmelCase : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
__UpperCAmelCase : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 168
|
'''simple docstring'''
from __future__ import annotations
def _lowercase ( lowerCamelCase__ ) -> float:
"""simple docstring"""
__UpperCAmelCase : Any = 0.00
__UpperCAmelCase : Union[str, Any] = 0
for resistor in resistors:
if resistor <= 0:
__UpperCAmelCase : Tuple = f"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(lowerCamelCase__ )
first_sum += 1 / float(lowerCamelCase__ )
index += 1
return 1 / first_sum
def _lowercase ( lowerCamelCase__ ) -> float:
"""simple docstring"""
__UpperCAmelCase : int = 0.00
__UpperCAmelCase : List[str] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__UpperCAmelCase : Tuple = f"""Resistor at index {index} has a negative value!"""
raise ValueError(lowerCamelCase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168
| 1
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCamelCase : str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> Tuple[int, int]:
def constraint_to_multiple_of(lowercase ,lowercase ,lowercase=0 ,lowercase=None ):
snake_case : Union[str, Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
snake_case : Any = math.floor(val / multiple ) * multiple
if x < min_val:
snake_case : Optional[int] = math.ceil(val / multiple ) * multiple
return x
snake_case : Optional[Any] = (output_size, output_size) if isinstance(lowercase ,lowercase ) else output_size
snake_case , snake_case : Dict = get_image_size(lowercase )
snake_case , snake_case : Union[str, Any] = output_size
# determine new height and width
snake_case : int = output_height / input_height
snake_case : Any = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
snake_case : int = scale_width
else:
# fit height
snake_case : Optional[int] = scale_height
snake_case : Optional[int] = constraint_to_multiple_of(scale_height * input_height ,multiple=lowercase )
snake_case : Any = constraint_to_multiple_of(scale_width * input_width ,multiple=lowercase )
return (new_height, new_width)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PILImageResampling.BILINEAR , A = False , A = 1 , A = True , A = 1 / 2_5_5 , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
snake_case : Optional[Any] = size if size is not None else {"""height""": 3_8_4, """width""": 3_8_4}
snake_case : Optional[Any] = get_size_dict(A )
snake_case : str = do_resize
snake_case : Any = size
snake_case : str = keep_aspect_ratio
snake_case : Any = ensure_multiple_of
snake_case : Union[str, Any] = resample
snake_case : List[str] = do_rescale
snake_case : List[Any] = rescale_factor
snake_case : Dict = do_normalize
snake_case : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , A , A , A = False , A = 1 , A = PILImageResampling.BICUBIC , A = None , **A , ) -> np.ndarray:
snake_case : Any = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
snake_case : Optional[Any] = get_resize_output_image_size(
A , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=A , multiple=A , )
return resize(A , size=A , resample=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> List[Any]:
return rescale(A , scale=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
snake_case : Optional[Any] = do_resize if do_resize is not None else self.do_resize
snake_case : str = size if size is not None else self.size
snake_case : List[str] = get_size_dict(A )
snake_case : Optional[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
snake_case : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
snake_case : Union[str, Any] = resample if resample is not None else self.resample
snake_case : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : str = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
snake_case : Optional[int] = image_std if image_std is not None else self.image_std
snake_case : Union[str, Any] = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case : Dict = [to_numpy_array(A ) for image in images]
if do_resize:
snake_case : Tuple = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_rescale:
snake_case : List[str] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
snake_case : List[str] = [self.normalize(image=A , mean=A , std=A ) for image in images]
snake_case : str = [to_channel_dimension_format(A , A ) for image in images]
snake_case : int = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
def UpperCAmelCase ( self , A , A = None ) -> Dict:
snake_case : List[str] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A ) != len(A ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(A ):
snake_case : Optional[int] = target_sizes.numpy()
snake_case : Any = []
for idx in range(len(A ) ):
snake_case : Union[str, Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=A )
snake_case : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A )
else:
snake_case : str = logits.argmax(dim=1 )
snake_case : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 684
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : Any = logging.get_logger(__name__)
class __lowercase (enum.Enum ):
"""simple docstring"""
_snake_case = 0
_snake_case = 1
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """generated"""
def __init__( self , *A , **A ) -> Optional[Any]:
super().__init__(*A , **A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase ( self , A=None , A=None , A=None , A=None , A=None , A=None , **A , ) -> Optional[int]:
snake_case : Tuple = {}
if truncation is not None:
snake_case : Union[str, Any] = truncation
snake_case : Dict = generate_kwargs
snake_case : int = {}
if return_tensors is not None and return_type is None:
snake_case : List[Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case : List[str] = return_type
if clean_up_tokenization_spaces is not None:
snake_case : int = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case : Tuple = self.tokenizer.encode(A , add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
snake_case : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
return True
def UpperCAmelCase ( self , *A , A ) -> Tuple:
snake_case : Union[str, Any] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
snake_case : Union[str, Any] = ([prefix + arg for arg in args[0]],)
snake_case : List[Any] = True
elif isinstance(args[0] , A ):
snake_case : str = (prefix + args[0],)
snake_case : str = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
snake_case : Optional[Any] = self.tokenizer(*A , padding=A , truncation=A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A , **A ) -> Union[str, Any]:
snake_case : Tuple = super().__call__(*A , **A )
if (
isinstance(args[0] , A )
and all(isinstance(A , A ) for el in args[0] )
and all(len(A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase ( self , A , A=TruncationStrategy.DO_NOT_TRUNCATE , **A ) -> str:
snake_case : Optional[Any] = self._parse_and_tokenize(A , truncation=A , **A )
return inputs
def UpperCAmelCase ( self , A , **A ) -> Tuple:
if self.framework == "pt":
snake_case , snake_case : List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
snake_case , snake_case : Optional[Any] = tf.shape(model_inputs["""input_ids"""] ).numpy()
snake_case : Dict = generate_kwargs.get("""min_length""" , self.model.config.min_length )
snake_case : str = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(A , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
snake_case : List[str] = self.model.generate(**A , **A )
snake_case : Dict = output_ids.shape[0]
if self.framework == "pt":
snake_case : List[Any] = output_ids.reshape(A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case : Any = tf.reshape(A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase ( self , A , A=ReturnType.TEXT , A=False ) -> Union[str, Any]:
snake_case : Tuple = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case : Dict = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
snake_case : int = {
f"""{self.return_name}_text""": self.tokenizer.decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
}
records.append(A )
return records
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """summary"""
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
def UpperCAmelCase ( self , A , A , A ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """translation"""
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def UpperCAmelCase ( self , *A , A=TruncationStrategy.DO_NOT_TRUNCATE , A=None , A=None ) -> Optional[int]:
if getattr(self.tokenizer , """_build_translation_inputs""" , A ):
return self.tokenizer._build_translation_inputs(
*A , return_tensors=self.framework , truncation=A , src_lang=A , tgt_lang=A )
else:
return super()._parse_and_tokenize(*A , truncation=A )
def UpperCAmelCase ( self , A=None , A=None , **A ) -> Union[str, Any]:
snake_case , snake_case , snake_case : str = super()._sanitize_parameters(**A )
if src_lang is not None:
snake_case : Tuple = src_lang
if tgt_lang is not None:
snake_case : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case : Union[str, Any] = kwargs.get("""task""" , self.task )
snake_case : Any = task.split("""_""" )
if task and len(A ) == 4:
# translation, XX, to YY
snake_case : Optional[Any] = items[1]
snake_case : Dict = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
| 684
| 1
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase : List[str] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : str , lowercase : Any=8 ):
'''simple docstring'''
lowerCamelCase_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCamelCase_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : str , A_ : UNetaDConditionModel , A_ : DDPMScheduler , A_ : VQModel , ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=A_ , scheduler=A_ , movq=A_ , )
lowerCamelCase_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def a__ ( self : List[Any] , A_ : Tuple , A_ : Dict , A_ : List[Any] , A_ : int , A_ : Any , A_ : Tuple ) -> Any:
"""simple docstring"""
if latents is None:
lowerCamelCase_ = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowerCamelCase_ = latents.to(A_ )
lowerCamelCase_ = latents * scheduler.init_noise_sigma
return latents
def a__ ( self : int , A_ : str=0 ) -> Optional[int]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase_ = torch.device(f"""cuda:{gpu_id}""" )
lowerCamelCase_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
def a__ ( self : Tuple , A_ : Union[str, Any]=0 ) -> Dict:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowerCamelCase_ = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=A_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCamelCase_ , lowerCamelCase_ = cpu_offload_with_hook(A_ , A_ , prev_module_hook=A_ )
# We'll offload the last model manually.
lowerCamelCase_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self : List[Any] , A_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A_ : int = 512 , A_ : int = 512 , A_ : int = 100 , A_ : float = 4.0 , A_ : int = 1 , A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[str] = "pil" , A_ : bool = True , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self._execution_device
lowerCamelCase_ = guidance_scale > 1.0
if isinstance(A_ , A_ ):
lowerCamelCase_ = torch.cat(A_ , dim=0 )
lowerCamelCase_ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(A_ , A_ ):
lowerCamelCase_ = torch.cat(A_ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_ = image_embeds.repeat_interleave(A_ , dim=0 )
lowerCamelCase_ = negative_image_embeds.repeat_interleave(A_ , dim=0 )
lowerCamelCase_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A_ )
self.scheduler.set_timesteps(A_ , device=A_ )
lowerCamelCase_ = self.scheduler.timesteps
lowerCamelCase_ = self.unet.config.in_channels
lowerCamelCase_ , lowerCamelCase_ = downscale_height_and_width(A_ , A_ , self.movq_scale_factor )
# create initial latent
lowerCamelCase_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A_ , A_ , A_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = {'image_embeds': image_embeds}
lowerCamelCase_ = self.unet(
sample=A_ , timestep=A_ , encoder_hidden_states=A_ , added_cond_kwargs=A_ , return_dict=A_ , )[0]
if do_classifier_free_guidance:
lowerCamelCase_ , lowerCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
lowerCamelCase_ , lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ , lowerCamelCase_ = variance_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase_ , lowerCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(
A_ , A_ , A_ , generator=A_ , )[0]
# post-processing
lowerCamelCase_ = self.movq.decode(A_ , force_not_quantize=A_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowerCamelCase_ = image * 0.5 + 0.5
lowerCamelCase_ = image.clamp(0 , 1 )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 70
|
'''simple docstring'''
__A : List[Any] = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
10: 'a',
11: 'b',
12: 'c',
13: 'd',
14: 'e',
15: 'f',
}
def UpperCAmelCase ( lowerCamelCase_ :float ):
'''simple docstring'''
assert type(lowerCamelCase_ ) in (int, float) and decimal == int(lowerCamelCase_ )
snake_case_ : int = int(lowerCamelCase_ )
snake_case_ : int = """"""
snake_case_ : List[str] = False
if decimal < 0:
snake_case_ : Any = True
decimal *= -1
while decimal > 0:
snake_case_ , snake_case_ : List[str] = divmod(lowerCamelCase_ , 16 )
snake_case_ : Tuple = values[remainder] + hexadecimal
snake_case_ : Dict = """0x""" + hexadecimal
if negative:
snake_case_ : Optional[int] = """-""" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 334
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = []
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []} )
for keyword in keywords:
self.add_keyword(UpperCamelCase__ )
self.set_fail_transitions()
def A ( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def A ( self : int , UpperCamelCase__ : int ):
"""simple docstring"""
UpperCamelCase = 0
for character in keyword:
UpperCamelCase = self.find_next_state(UpperCamelCase__ , UpperCamelCase__ )
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCamelCase = len(self.adlist ) - 1
else:
UpperCamelCase = next_state
self.adlist[current_state]["output"].append(UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = deque()
for node in self.adlist[0]["next_states"]:
q.append(UpperCamelCase__ )
UpperCamelCase = 0
while q:
UpperCamelCase = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(UpperCamelCase__ )
UpperCamelCase = self.adlist[r]['fail_state']
while (
self.find_next_state(UpperCamelCase__ , self.adlist[child]['value'] ) is None
and state != 0
):
UpperCamelCase = self.adlist[state]['fail_state']
UpperCamelCase = self.find_next_state(
UpperCamelCase__ , self.adlist[child]['value'] )
if self.adlist[child]["fail_state"] is None:
UpperCamelCase = 0
UpperCamelCase = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def A ( self : Dict , UpperCamelCase__ : int ):
"""simple docstring"""
UpperCamelCase = {} # returns a dict with keywords and list of its occurrences
UpperCamelCase = 0
for i in range(len(UpperCamelCase__ ) ):
while (
self.find_next_state(UpperCamelCase__ , string[i] ) is None
and current_state != 0
):
UpperCamelCase = self.adlist[current_state]['fail_state']
UpperCamelCase = self.find_next_state(UpperCamelCase__ , string[i] )
if next_state is None:
UpperCamelCase = 0
else:
UpperCamelCase = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCamelCase = []
result[key].append(i - len(UpperCamelCase__ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_lowerCamelCase : List[str] = datasets.load_iris()
_lowerCamelCase : Optional[Any] = np.array(data["data"])
_lowerCamelCase : Tuple = np.array(data["target"])
_lowerCamelCase : int = data["target_names"]
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase : Any = train_test_split(X, y)
def __lowerCamelCase ( A__ , A__ ) -> int:
"""simple docstring"""
return np.linalg.norm(np.array(A__ ) - np.array(A__ ) )
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__=5 ) -> Any:
"""simple docstring"""
UpperCamelCase = zip(A__ , A__ )
# List of distances of all points from the point to be classified
UpperCamelCase = []
for data_point in data:
UpperCamelCase = euclidean_distance(data_point[0] , A__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
UpperCamelCase = [i[1] for i in sorted(A__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
UpperCamelCase = Counter(A__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 324
| 0
|
def _snake_case ( __snake_case ):
_UpperCamelCase = int(__snake_case )
if decimal in (0, 1): # Exit cases for the recursion
return str(__snake_case )
_UpperCamelCase , _UpperCamelCase = divmod(__snake_case , 2 )
return binary_recursive(__snake_case ) + str(__snake_case )
def _snake_case ( __snake_case ):
_UpperCamelCase = str(__snake_case ).strip()
if not number:
raise ValueError('''No input value was provided''' )
_UpperCamelCase = '''-''' if number.startswith('''-''' ) else ''''''
_UpperCamelCase = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return f"""{negative}0b{binary_recursive(int(__snake_case ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 10
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Optional[Any] = logging.get_logger(__name__)
a : Dict = {
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "swin2sr"
__lowerCamelCase = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=64 , snake_case__=1 , snake_case__=3 , snake_case__=180 , snake_case__=[6, 6, 6, 6, 6, 6] , snake_case__=[6, 6, 6, 6, 6, 6] , snake_case__=8 , snake_case__=2.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=False , snake_case__=0.02 , snake_case__=1e-5 , snake_case__=2 , snake_case__=1.0 , snake_case__="1conv" , snake_case__="pixelshuffle" , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase__ : List[str]= image_size
lowercase__ : Union[str, Any]= patch_size
lowercase__ : List[Any]= num_channels
lowercase__ : int= embed_dim
lowercase__ : str= depths
lowercase__ : Optional[Any]= len(snake_case__ )
lowercase__ : Dict= num_heads
lowercase__ : int= window_size
lowercase__ : Optional[Any]= mlp_ratio
lowercase__ : Dict= qkv_bias
lowercase__ : str= hidden_dropout_prob
lowercase__ : Dict= attention_probs_dropout_prob
lowercase__ : Optional[int]= drop_path_rate
lowercase__ : Union[str, Any]= hidden_act
lowercase__ : List[Any]= use_absolute_embeddings
lowercase__ : str= layer_norm_eps
lowercase__ : Tuple= initializer_range
lowercase__ : Tuple= upscale
lowercase__ : Any= img_range
lowercase__ : Optional[int]= resi_connection
lowercase__ : List[Any]= upsampler
| 218
| 0
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__A : List[Any] = logging.get_logger(__name__)
__A : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
__A : int = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
__A : Tuple = {
"allenai/led-base-16384": 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase ( ):
"""simple docstring"""
A__ : Union[str, Any] =(
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
A__ : Dict =bs[:]
A__ : Any =0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase )
cs.append(2**8 + n )
n += 1
A__ : str =[chr(UpperCamelCase ) for n in cs]
return dict(zip(UpperCamelCase , UpperCamelCase ) )
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : Any =set()
A__ : str =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ : Dict =char
return pairs
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Dict = VOCAB_FILES_NAMES
__magic_name__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Any="replace" , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Union[str, Any]="<s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : int="<pad>" , UpperCamelCase__ : Optional[Any]="<mask>" , UpperCamelCase__ : Union[str, Any]=False , **UpperCamelCase__ : Dict , ):
A__ : Optional[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
A__ : List[str] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
A__ : Dict =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
A__ : Tuple =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
A__ : Optional[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
A__ : List[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A__ : int =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
with open(UpperCamelCase__ , encoding="utf-8" ) as vocab_handle:
A__ : Tuple =json.load(UpperCamelCase__ )
A__ : Dict ={v: k for k, v in self.encoder.items()}
A__ : Tuple =errors # how to handle errors in decoding
A__ : Optional[int] =bytes_to_unicode()
A__ : int ={v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase__ , encoding="utf-8" ) as merges_handle:
A__ : Dict =merges_handle.read().split("\n" )[1:-1]
A__ : str =[tuple(merge.split() ) for merge in bpe_merges]
A__ : Optional[Any] =dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A__ : str ={}
A__ : Tuple =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A__ : List[str] =re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _UpperCAmelCase ( self : List[str] ):
return len(self.encoder )
def _UpperCAmelCase ( self : Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Tuple ):
if token in self.cache:
return self.cache[token]
A__ : List[Any] =tuple(UpperCamelCase__ )
A__ : List[Any] =get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
A__ : str =min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ : Optional[Any] =bigram
A__ : Optional[Any] =[]
A__ : Optional[Any] =0
while i < len(UpperCamelCase__ ):
try:
A__ : Optional[Any] =word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ : Dict =j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ : Optional[int] =tuple(UpperCamelCase__ )
A__ : str =new_word
if len(UpperCamelCase__ ) == 1:
break
else:
A__ : str =get_pairs(UpperCamelCase__ )
A__ : str =" ".join(UpperCamelCase__ )
A__ : Tuple =word
return word
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Optional[int] ):
A__ : Tuple =[]
for token in re.findall(self.pat , UpperCamelCase__ ):
A__ : List[Any] ="".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase__ ).split(" " ) )
return bpe_tokens
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : str ):
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : int ):
return self.decoder.get(UpperCamelCase__ )
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict ):
A__ : Dict ="".join(UpperCamelCase__ )
A__ : Tuple =bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : List[str] =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A__ : Optional[int] =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + "\n" )
A__ : Optional[Any] =0
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
A__ : Optional[int] =token_index
writer.write(" ".join(UpperCamelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ : Dict =[self.cls_token_id]
A__ : Any =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : int =[self.sep_token_id]
A__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any]=False , **UpperCamelCase__ : str ):
A__ : str =kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase__ ) > 0 and not text[0].isspace()):
A__ : Tuple =" " + text
return (text, kwargs)
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , ):
A__ : str =super()._pad(
encoded_inputs=UpperCamelCase__ , max_length=UpperCamelCase__ , padding_strategy=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
# Load from model defaults
if return_attention_mask is None:
A__ : Tuple ="attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A__ : List[Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A__ : int =len(encoded_inputs["global_attention_mask"] ) != len(UpperCamelCase__ )
if needs_to_be_padded:
A__ : Any =len(UpperCamelCase__ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A__ : List[str] =(
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
A__ : Union[str, Any] =[-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 595
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int=7 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : int=18 , UpperCamelCase__ : Union[str, Any]=30 , UpperCamelCase__ : Optional[Any]=400 , UpperCamelCase__ : str=True , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : str=[0.5, 0.5, 0.5] , UpperCamelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , ):
A__ : List[Any] =size if size is not None else {"shortest_edge": 18}
A__ : str =crop_size if crop_size is not None else {"height": 18, "width": 18}
A__ : List[Any] =parent
A__ : List[str] =batch_size
A__ : List[Any] =num_channels
A__ : List[str] =image_size
A__ : Tuple =min_resolution
A__ : int =max_resolution
A__ : List[str] =do_resize
A__ : Union[str, Any] =size
A__ : str =do_center_crop
A__ : str =crop_size
A__ : Dict =do_normalize
A__ : List[Any] =image_mean
A__ : int =image_std
def _UpperCAmelCase ( self : Any ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Any = LevitImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self : List[Any] ):
A__ : Any =LevitImageProcessingTester(self )
@property
def _UpperCAmelCase ( self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self : Any ):
A__ : Any =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "size" ) )
def _UpperCAmelCase ( self : str ):
A__ : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
A__ : str =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _UpperCAmelCase ( self : List[Any] ):
pass
def _UpperCAmelCase ( self : Dict ):
# Initialize image_processing
A__ : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ : Optional[int] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
A__ : Any =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ : Tuple =image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _UpperCAmelCase ( self : Dict ):
# Initialize image_processing
A__ : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
A__ : Dict =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ : Optional[int] =image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _UpperCAmelCase ( self : Any ):
# Initialize image_processing
A__ : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ : Optional[int] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
A__ : List[str] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ : List[str] =image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 595
| 1
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , ):
if config_name_or_path is None:
A_ = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
A_ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
A_ = question_encoder_name_or_path
A_ = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
A_ = RagConfig.from_pretrained(__UpperCAmelCase )
A_ = AutoConfig.from_pretrained(__UpperCAmelCase )
A_ = AutoConfig.from_pretrained(__UpperCAmelCase )
A_ = gen_config
A_ = question_encoder_config
A_ = model_class.from_pretrained_question_encoder_generator(
__UpperCAmelCase , __UpperCAmelCase , config=__UpperCAmelCase )
rag_model.save_pretrained(__UpperCAmelCase )
# Sanity check.
model_class.from_pretrained(__UpperCAmelCase )
# Save tokenizers.
A_ = AutoTokenizer.from_pretrained(__UpperCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
A_ = AutoTokenizer.from_pretrained(__UpperCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token"],
required=True,
type=str,
help="RAG model type: rag_sequence, rag_token",
)
parser.add_argument("--dest", type=str, required=True, help="Path to the output checkpoint directory.")
parser.add_argument("--generator_name_or_path", type=str, required=True, help="Generator model identifier")
parser.add_argument(
"--question_encoder_name_or_path", type=str, required=True, help="Question encoder model identifier"
)
parser.add_argument(
"--generator_tokenizer_name_or_path",
type=str,
help="Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``",
)
parser.add_argument(
"--question_encoder_tokenizer_name_or_path",
type=str,
help="Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``",
)
parser.add_argument(
"--config_name_or_path",
type=str,
help=(
"Identifier of the model config to use, if not provided, resolves to a base config for a given"
" ``model_type``"
),
)
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
SCREAMING_SNAKE_CASE : Union[str, Any] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 141
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : Optional[int] = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__snake_case : List[Any] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__snake_case : str = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__snake_case : Optional[Any] = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 16_000,
'return_attention_mask': False,
'do_normalize': True,
}
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case : Any = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '\n' )
# load decoder from hub
__snake_case : int = 'hf-internal-testing/ngram-beam-search-decoder'
def lowercase_ ( self , **_UpperCAmelCase ):
__snake_case : int = self.add_kwargs_tokens_map.copy()
kwargs.update(_UpperCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , **_UpperCAmelCase ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , **_UpperCAmelCase ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_UpperCAmelCase )
def lowercase_ ( self ):
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self ):
__snake_case : Dict = self.get_tokenizer()
__snake_case : Optional[Any] = self.get_feature_extractor()
__snake_case : Optional[int] = self.get_decoder()
__snake_case : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
__snake_case : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _UpperCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(_UpperCAmelCase , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=_UpperCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.get_feature_extractor()
__snake_case : int = self.get_tokenizer()
__snake_case : int = self.get_decoder()
__snake_case : Any = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase )
__snake_case : Union[str, Any] = floats_list((3, 1_000) )
__snake_case : Optional[Any] = feature_extractor(_UpperCAmelCase , return_tensors='np' )
__snake_case : Dict = processor(_UpperCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase_ ( self ):
__snake_case : List[str] = self.get_feature_extractor()
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : Any = self.get_decoder()
__snake_case : int = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase )
__snake_case : str = 'This is a test string'
__snake_case : Optional[int] = processor(text=_UpperCAmelCase )
__snake_case : int = tokenizer(_UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self , _UpperCAmelCase=(2, 10, 16) , _UpperCAmelCase=77 ):
np.random.seed(_UpperCAmelCase )
return np.random.rand(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.get_feature_extractor()
__snake_case : Any = self.get_tokenizer()
__snake_case : Dict = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase )
__snake_case : Optional[Any] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case : int = processor.decode(_UpperCAmelCase )
__snake_case : Tuple = decoder.decode_beams(_UpperCAmelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Union[str, Any] = self.get_feature_extractor()
__snake_case : Optional[int] = self.get_tokenizer()
__snake_case : Union[str, Any] = self.get_decoder()
__snake_case : Any = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase )
__snake_case : List[str] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case : List[Any] = processor.batch_decode(_UpperCAmelCase )
else:
with get_context(_UpperCAmelCase ).Pool() as pool:
__snake_case : Tuple = processor.batch_decode(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : List[str] = list(_UpperCAmelCase )
with get_context('fork' ).Pool() as p:
__snake_case : List[Any] = decoder.decode_beams_batch(_UpperCAmelCase , _UpperCAmelCase )
__snake_case , __snake_case , __snake_case : Optional[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_UpperCAmelCase , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(_UpperCAmelCase , decoded_processor.logit_score )
self.assertListEqual(_UpperCAmelCase , decoded_processor.lm_score )
def lowercase_ ( self ):
__snake_case : Tuple = self.get_feature_extractor()
__snake_case : Dict = self.get_tokenizer()
__snake_case : int = self.get_decoder()
__snake_case : List[Any] = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase )
__snake_case : int = self._get_dummy_logits()
__snake_case : Tuple = 15
__snake_case : int = -20.0
__snake_case : Optional[Any] = -4.0
__snake_case : int = processor.batch_decode(
_UpperCAmelCase , beam_width=_UpperCAmelCase , beam_prune_logp=_UpperCAmelCase , token_min_logp=_UpperCAmelCase , )
__snake_case : int = decoded_processor_out.text
__snake_case : Dict = list(_UpperCAmelCase )
with get_context('fork' ).Pool() as pool:
__snake_case : Tuple = decoder.decode_beams_batch(
_UpperCAmelCase , _UpperCAmelCase , beam_width=_UpperCAmelCase , beam_prune_logp=_UpperCAmelCase , token_min_logp=_UpperCAmelCase , )
__snake_case : List[str] = [d[0][0] for d in decoded_decoder_out]
__snake_case : Any = [d[0][2] for d in decoded_decoder_out]
__snake_case : Optional[int] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , _UpperCAmelCase )
self.assertTrue(np.array_equal(_UpperCAmelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _UpperCAmelCase , atol=1E-3 ) )
self.assertTrue(np.array_equal(_UpperCAmelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _UpperCAmelCase , atol=1E-3 ) )
def lowercase_ ( self ):
__snake_case : List[Any] = self.get_feature_extractor()
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : int = self.get_decoder()
__snake_case : int = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase )
__snake_case : Dict = self._get_dummy_logits()
__snake_case : List[str] = 2.0
__snake_case : Union[str, Any] = 5.0
__snake_case : List[str] = -20.0
__snake_case : Tuple = True
__snake_case : List[Any] = processor.batch_decode(
_UpperCAmelCase , alpha=_UpperCAmelCase , beta=_UpperCAmelCase , unk_score_offset=_UpperCAmelCase , lm_score_boundary=_UpperCAmelCase , )
__snake_case : Tuple = decoded_processor_out.text
__snake_case : List[str] = list(_UpperCAmelCase )
decoder.reset_params(
alpha=_UpperCAmelCase , beta=_UpperCAmelCase , unk_score_offset=_UpperCAmelCase , lm_score_boundary=_UpperCAmelCase , )
with get_context('fork' ).Pool() as pool:
__snake_case : str = decoder.decode_beams_batch(
_UpperCAmelCase , _UpperCAmelCase , )
__snake_case : List[str] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , _UpperCAmelCase )
__snake_case : int = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case : Any = processor.decoder.model_container[processor.decoder._model_key]
__snake_case : List[str] = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case : List[Any] = os.listdir(_UpperCAmelCase )
__snake_case : Tuple = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : str = snapshot_download('hf-internal-testing/processor_with_lm' )
__snake_case : List[str] = WavaVecaProcessorWithLM.from_pretrained(_UpperCAmelCase )
__snake_case : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
__snake_case : Optional[int] = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case : List[str] = os.listdir(_UpperCAmelCase )
__snake_case : Tuple = os.listdir(_UpperCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Any = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case : str = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case : Dict = floats_list((3, 1_000) )
__snake_case : Dict = processor_wavaveca(_UpperCAmelCase , return_tensors='np' )
__snake_case : str = processor_auto(_UpperCAmelCase , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__snake_case : Optional[int] = self._get_dummy_logits()
__snake_case : Optional[Any] = processor_wavaveca.batch_decode(_UpperCAmelCase )
__snake_case : Optional[int] = processor_auto.batch_decode(_UpperCAmelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowercase_ ( self ):
__snake_case : List[str] = self.get_feature_extractor()
__snake_case : Tuple = self.get_tokenizer()
__snake_case : Optional[Any] = self.get_decoder()
__snake_case : List[str] = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[str] = [d[key] for d in offsets]
return retrieved_list
def lowercase_ ( self ):
__snake_case : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case : List[Any] = self._get_dummy_logits()[0]
__snake_case : Dict = processor.decode(_UpperCAmelCase , output_word_offsets=_UpperCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def lowercase_ ( self ):
__snake_case : Dict = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case : Tuple = self._get_dummy_logits()
__snake_case : str = processor.batch_decode(_UpperCAmelCase , output_word_offsets=_UpperCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(_UpperCAmelCase , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowercase_ ( self ):
import torch
__snake_case : Tuple = load_dataset('common_voice' , 'en' , split='train' , streaming=_UpperCAmelCase )
__snake_case : Any = ds.cast_column('audio' , datasets.Audio(sampling_rate=16_000 ) )
__snake_case : Tuple = iter(_UpperCAmelCase )
__snake_case : Optional[Any] = next(_UpperCAmelCase )
__snake_case : str = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
__snake_case : Any = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case : Optional[int] = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
__snake_case : Optional[int] = model(_UpperCAmelCase ).logits.cpu().numpy()
__snake_case : Dict = processor.decode(logits[0] , output_word_offsets=_UpperCAmelCase )
__snake_case : Union[str, Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case : List[Any] = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__snake_case : Tuple = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(_UpperCAmelCase , 'word' ) ) , _UpperCAmelCase )
self.assertEqual(' '.join(self.get_from_offsets(_UpperCAmelCase , 'word' ) ) , output.text )
# output times
__snake_case : Dict = torch.tensor(self.get_from_offsets(_UpperCAmelCase , 'start_time' ) )
__snake_case : Optional[int] = torch.tensor(self.get_from_offsets(_UpperCAmelCase , 'end_time' ) )
# fmt: off
__snake_case : List[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__snake_case : Optional[Any] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=0.01 ) )
| 576
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[str] ="""bert"""
def __init__( self : Union[str, Any] , UpperCamelCase : Optional[int]=3_05_22 , UpperCamelCase : str=7_68 , UpperCamelCase : List[str]=12 , UpperCamelCase : Tuple=12 , UpperCamelCase : Dict=30_72 , UpperCamelCase : Tuple="gelu" , UpperCamelCase : str=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Dict=5_12 , UpperCamelCase : str=2 , UpperCamelCase : Union[str, Any]=0.02 , UpperCamelCase : Optional[int]=1e-1_2 , UpperCamelCase : int=0 , UpperCamelCase : Tuple="absolute" , UpperCamelCase : Tuple=True , UpperCamelCase : Union[str, Any]=None , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : Dict = vocab_size
_snake_case : int = hidden_size
_snake_case : Tuple = num_hidden_layers
_snake_case : List[Any] = num_attention_heads
_snake_case : Optional[int] = hidden_act
_snake_case : Tuple = intermediate_size
_snake_case : int = hidden_dropout_prob
_snake_case : Dict = attention_probs_dropout_prob
_snake_case : Optional[Any] = max_position_embeddings
_snake_case : int = type_vocab_size
_snake_case : List[Any] = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : int = position_embedding_type
_snake_case : Dict = use_cache
_snake_case : Any = classifier_dropout
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 702
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : int ="""roberta"""
def __init__( self : int , UpperCamelCase : Tuple=5_02_65 , UpperCamelCase : Any=7_68 , UpperCamelCase : List[Any]=12 , UpperCamelCase : str=12 , UpperCamelCase : Dict=30_72 , UpperCamelCase : Any="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : List[str]=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : Tuple=1e-1_2 , UpperCamelCase : str=1 , UpperCamelCase : int=0 , UpperCamelCase : Any=2 , UpperCamelCase : int="absolute" , UpperCamelCase : int=True , UpperCamelCase : List[Any]=None , **UpperCamelCase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : Any = vocab_size
_snake_case : List[str] = hidden_size
_snake_case : List[str] = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : List[str] = hidden_act
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Dict = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : Tuple = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Union[str, Any] = use_cache
_snake_case : str = classifier_dropout
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669
| 0
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__a)
class _UpperCAmelCase ( __a):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__a : str = field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True})
__a : ClassVar[Features] = Features({"""text""": Value("""string""")})
__a : ClassVar[Features] = Features({"""labels""": ClassLabel})
__a : str = "text"
__a : str = "labels"
def __snake_case ( self , _A ) -> str:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , _A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
_UpperCAmelCase : str = copy.deepcopy(self )
_UpperCAmelCase : int = self.label_schema.copy()
_UpperCAmelCase : Optional[Any] = features[self.label_column]
_UpperCAmelCase : Union[str, Any] = label_schema
return task_template
@property
def __snake_case ( self ) -> Dict[str, str]:
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 238
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class _UpperCAmelCase ( __a):
__a : Optional[Any] = """SpeechT5FeatureExtractor"""
__a : Dict = """SpeechT5Tokenizer"""
def __init__( self , _A , _A ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_A , _A )
def __call__( self , *_A , **_A ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Any = kwargs.pop("""audio""" , _A )
_UpperCAmelCase : Tuple = kwargs.pop("""text""" , _A )
_UpperCAmelCase : Any = kwargs.pop("""text_target""" , _A )
_UpperCAmelCase : Optional[Any] = kwargs.pop("""audio_target""" , _A )
_UpperCAmelCase : Any = kwargs.pop("""sampling_rate""" , _A )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
_UpperCAmelCase : Optional[Any] = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
elif text is not None:
_UpperCAmelCase : List[str] = self.tokenizer(_A , **_A )
else:
_UpperCAmelCase : Optional[int] = None
if audio_target is not None:
_UpperCAmelCase : List[Any] = self.feature_extractor(audio_target=_A , *_A , sampling_rate=_A , **_A )
_UpperCAmelCase : Union[str, Any] = targets["""input_values"""]
elif text_target is not None:
_UpperCAmelCase : Optional[int] = self.tokenizer(_A , **_A )
_UpperCAmelCase : Union[str, Any] = targets["""input_ids"""]
else:
_UpperCAmelCase : List[Any] = None
if inputs is None:
return targets
if targets is not None:
_UpperCAmelCase : List[str] = labels
_UpperCAmelCase : List[str] = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
_UpperCAmelCase : Optional[int] = decoder_attention_mask
return inputs
def __snake_case ( self , *_A , **_A ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = kwargs.pop("""input_values""" , _A )
_UpperCAmelCase : List[Any] = kwargs.pop("""input_ids""" , _A )
_UpperCAmelCase : int = kwargs.pop("""labels""" , _A )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
_UpperCAmelCase : Optional[int] = self.feature_extractor.pad(_A , *_A , **_A )
elif input_ids is not None:
_UpperCAmelCase : Tuple = self.tokenizer.pad(_A , **_A )
else:
_UpperCAmelCase : Any = None
if labels is not None:
if "input_ids" in labels or (isinstance(_A , _A ) and "input_ids" in labels[0]):
_UpperCAmelCase : Optional[Any] = self.tokenizer.pad(_A , **_A )
_UpperCAmelCase : Optional[Any] = targets["""input_ids"""]
else:
_UpperCAmelCase : List[Any] = self.feature_extractor.feature_size
_UpperCAmelCase : Tuple = self.feature_extractor.num_mel_bins
_UpperCAmelCase : List[str] = self.feature_extractor.pad(_A , *_A , **_A )
_UpperCAmelCase : List[Any] = feature_size_hack
_UpperCAmelCase : Dict = targets["""input_values"""]
else:
_UpperCAmelCase : Optional[Any] = None
if inputs is None:
return targets
if targets is not None:
_UpperCAmelCase : Union[str, Any] = labels
_UpperCAmelCase : Dict = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
_UpperCAmelCase : Optional[Any] = decoder_attention_mask
return inputs
def __snake_case ( self , *_A , **_A ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A )
def __snake_case ( self , *_A , **_A ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A )
| 238
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 217
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 10**-10 ) ->float:
a__: int = a
while True:
a__: Optional[Any] = Decimal(_SCREAMING_SNAKE_CASE ) - (
Decimal(eval(_SCREAMING_SNAKE_CASE ) ) / Decimal(eval(str(diff(_SCREAMING_SNAKE_CASE ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_SCREAMING_SNAKE_CASE ) ) < precision: # noqa: S307
return float(_SCREAMING_SNAKE_CASE )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
print(f"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
# Find Square Root of 5
print(f"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
# Exponential Roots
print(f"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
| 217
| 1
|
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
UpperCamelCase_ = open # noqa: we just need to have a builtin inside this module to test it properly
| 92
|
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCamelCase_ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCamelCase_ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCamelCase_ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, float]:
lowercase : int =len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] )
return (item, float(__magic_name__ ))
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, str]:
lowercase : Any =random.randint(0 , len(__magic_name__ ) - 1 )
lowercase : Tuple =parent_a[:random_slice] + parent_a[random_slice:]
lowercase : List[str] =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] ) -> str:
lowercase : Union[str, Any] =list(__magic_name__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowercase : Dict =random.choice(__magic_name__ )
return "".join(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : tuple[str, float] , __magic_name__ : list[tuple[str, float]] , __magic_name__ : list[str] , ) -> list[str]:
lowercase : Any =[]
# Generate more children proportionally to the fitness score.
lowercase : Dict =int(parent_a[1] * 100 ) + 1
lowercase : List[str] =10 if child_n >= 10 else child_n
for _ in range(__magic_name__ ):
lowercase : List[str] =population_score[random.randint(0 , __magic_name__ )][0]
lowercase , lowercase : Dict =crossover(parent_a[0] , __magic_name__ )
# Append new string to the population list.
pop.append(mutate(__magic_name__ , __magic_name__ ) )
pop.append(mutate(__magic_name__ , __magic_name__ ) )
return pop
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] , __magic_name__ : bool = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowercase : List[str] =f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(__magic_name__ )
# Verify that the target contains no genes besides the ones inside genes variable.
lowercase : Optional[int] =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowercase : Dict =f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(__magic_name__ )
# Generate random starting population.
lowercase : int =[]
for _ in range(__magic_name__ ):
population.append(''''''.join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) )
# Just some logs to know what the algorithms is doing.
lowercase , lowercase : Optional[int] =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__magic_name__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowercase : List[str] =[evaluate(__magic_name__ , __magic_name__ ) for item in population]
# Check if there is a matching evolution.
lowercase : int =sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowercase : Any =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__magic_name__ )
# Normalize population score to be between 0 and 1.
lowercase : Dict =[
(item, score / len(__magic_name__ )) for item, score in population_score
]
# This is selection
for i in range(__magic_name__ ):
population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__magic_name__ ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCamelCase_ = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
UpperCamelCase_ = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"""
)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 92
| 1
|
'''simple docstring'''
from manim import *
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
def UpperCamelCase__ ( self : List[str] ) -> str:
_UpperCamelCase =Rectangle(height=0.5 , width=0.5 )
_UpperCamelCase =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_UpperCamelCase =[mem.copy() for i in range(6 )]
_UpperCamelCase =[mem.copy() for i in range(6 )]
_UpperCamelCase =VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_UpperCamelCase =VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_UpperCamelCase =VGroup(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_UpperCamelCase =Text('''CPU''' , font_size=24 )
_UpperCamelCase =Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCamelCase__ )
_UpperCamelCase =[mem.copy() for i in range(1 )]
_UpperCamelCase =VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_UpperCamelCase =Text('''GPU''' , font_size=24 )
_UpperCamelCase =Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
gpu.align_to(UpperCamelCase__ , UpperCamelCase__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(UpperCamelCase__ )
_UpperCamelCase =[mem.copy() for i in range(6 )]
_UpperCamelCase =VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_UpperCamelCase =Text('''Model''' , font_size=24 )
_UpperCamelCase =Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(UpperCamelCase__ , run_time=1 ) , Create(UpperCamelCase__ , run_time=1 ) , Create(UpperCamelCase__ , run_time=1 ) , )
_UpperCamelCase =MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
_UpperCamelCase =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCamelCase =MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase__ , run_time=2.5 ) , Write(UpperCamelCase__ ) , Write(UpperCamelCase__ ) )
self.add(UpperCamelCase__ )
_UpperCamelCase =[]
_UpperCamelCase =[]
_UpperCamelCase =[]
for i, rect in enumerate(UpperCamelCase__ ):
_UpperCamelCase =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase__ , opacity=0.7 )
cpu_target.move_to(UpperCamelCase__ )
cpu_target.generate_target()
_UpperCamelCase =0.46 / 4
_UpperCamelCase =0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCamelCase__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=UpperCamelCase__ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=UpperCamelCase__ , buff=0.0 )
cpu_targs.append(UpperCamelCase__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(UpperCamelCase__ ) )
second_animations.append(MoveToTarget(UpperCamelCase__ , run_time=1.5 ) )
self.play(*UpperCamelCase__ )
self.play(*UpperCamelCase__ )
self.wait()
| 271
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Optional[int] = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 271
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : "DiagonalGaussianDistribution"
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = True
@register_to_config
def __init__( self : Optional[int] ,lowercase__ : int = 3 ,lowercase__ : int = 3 ,lowercase__ : Tuple[str] = ("DownEncoderBlock2D",) ,lowercase__ : Tuple[str] = ("UpDecoderBlock2D",) ,lowercase__ : Tuple[int] = (6_4,) ,lowercase__ : int = 1 ,lowercase__ : str = "silu" ,lowercase__ : int = 4 ,lowercase__ : int = 3_2 ,lowercase__ : int = 3_2 ,lowercase__ : float = 0.1_8_2_1_5 ,):
super().__init__()
# pass init params to Encoder
__lowercase = Encoder(
in_channels=lowercase__ ,out_channels=lowercase__ ,down_block_types=lowercase__ ,block_out_channels=lowercase__ ,layers_per_block=lowercase__ ,act_fn=lowercase__ ,norm_num_groups=lowercase__ ,double_z=lowercase__ ,)
# pass init params to Decoder
__lowercase = Decoder(
in_channels=lowercase__ ,out_channels=lowercase__ ,up_block_types=lowercase__ ,block_out_channels=lowercase__ ,layers_per_block=lowercase__ ,norm_num_groups=lowercase__ ,act_fn=lowercase__ ,)
__lowercase = nn.Convad(2 * latent_channels ,2 * latent_channels ,1 )
__lowercase = nn.Convad(lowercase__ ,lowercase__ ,1 )
__lowercase = False
__lowercase = False
# only relevant if vae tiling is enabled
__lowercase = self.config.sample_size
__lowercase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size ,(list, tuple) )
else self.config.sample_size
)
__lowercase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__lowercase = 0.2_5
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : str=False ):
if isinstance(lowercase__ ,(Encoder, Decoder) ):
__lowercase = value
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : bool = True ):
__lowercase = use_tiling
def SCREAMING_SNAKE_CASE ( self : int ):
self.enable_tiling(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = True
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = {}
def fn_recursive_add_processors(lowercase__ : str ,lowercase__ : torch.nn.Module ,lowercase__ : Dict[str, AttentionProcessor] ):
if hasattr(lowercase__ ,'''set_processor''' ):
__lowercase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" ,lowercase__ ,lowercase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowercase__ ,lowercase__ ,lowercase__ )
return processors
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
__lowercase = len(self.attn_processors.keys() )
if isinstance(lowercase__ ,lowercase__ ) and len(lowercase__ ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(lowercase__ )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(lowercase__ : str ,lowercase__ : torch.nn.Module ,lowercase__ : Tuple ):
if hasattr(lowercase__ ,'''set_processor''' ):
if not isinstance(lowercase__ ,lowercase__ ):
module.set_processor(lowercase__ )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" ,lowercase__ ,lowercase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowercase__ ,lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : torch.FloatTensor ,lowercase__ : bool = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowercase__ ,return_dict=lowercase__ )
if self.use_slicing and x.shape[0] > 1:
__lowercase = [self.encoder(lowercase__ ) for x_slice in x.split(1 )]
__lowercase = torch.cat(lowercase__ )
else:
__lowercase = self.encoder(lowercase__ )
__lowercase = self.quant_conv(lowercase__ )
__lowercase = DiagonalGaussianDistribution(lowercase__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : torch.FloatTensor ,lowercase__ : bool = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowercase__ ,return_dict=lowercase__ )
__lowercase = self.post_quant_conv(lowercase__ )
__lowercase = self.decoder(lowercase__ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase__ )
@apply_forward_hook
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : torch.FloatTensor ,lowercase__ : bool = True ):
if self.use_slicing and z.shape[0] > 1:
__lowercase = [self._decode(lowercase__ ).sample for z_slice in z.split(1 )]
__lowercase = torch.cat(lowercase__ )
else:
__lowercase = self._decode(lowercase__ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : Tuple ):
__lowercase = min(a.shape[2] ,b.shape[2] ,lowercase__ )
for y in range(lowercase__ ):
__lowercase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Any ):
__lowercase = min(a.shape[3] ,b.shape[3] ,lowercase__ )
for x in range(lowercase__ ):
__lowercase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : torch.FloatTensor ,lowercase__ : bool = True ):
__lowercase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__lowercase = int(self.tile_latent_min_size * self.tile_overlap_factor )
__lowercase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__lowercase = []
for i in range(0 ,x.shape[2] ,lowercase__ ):
__lowercase = []
for j in range(0 ,x.shape[3] ,lowercase__ ):
__lowercase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__lowercase = self.encoder(lowercase__ )
__lowercase = self.quant_conv(lowercase__ )
row.append(lowercase__ )
rows.append(lowercase__ )
__lowercase = []
for i, row in enumerate(lowercase__ ):
__lowercase = []
for j, tile in enumerate(lowercase__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowercase = self.blend_v(rows[i - 1][j] ,lowercase__ ,lowercase__ )
if j > 0:
__lowercase = self.blend_h(row[j - 1] ,lowercase__ ,lowercase__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowercase__ ,dim=3 ) )
__lowercase = torch.cat(lowercase__ ,dim=2 )
__lowercase = DiagonalGaussianDistribution(lowercase__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : torch.FloatTensor ,lowercase__ : bool = True ):
__lowercase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__lowercase = int(self.tile_sample_min_size * self.tile_overlap_factor )
__lowercase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__lowercase = []
for i in range(0 ,z.shape[2] ,lowercase__ ):
__lowercase = []
for j in range(0 ,z.shape[3] ,lowercase__ ):
__lowercase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__lowercase = self.post_quant_conv(lowercase__ )
__lowercase = self.decoder(lowercase__ )
row.append(lowercase__ )
rows.append(lowercase__ )
__lowercase = []
for i, row in enumerate(lowercase__ ):
__lowercase = []
for j, tile in enumerate(lowercase__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowercase = self.blend_v(rows[i - 1][j] ,lowercase__ ,lowercase__ )
if j > 0:
__lowercase = self.blend_h(row[j - 1] ,lowercase__ ,lowercase__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowercase__ ,dim=3 ) )
__lowercase = torch.cat(lowercase__ ,dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : torch.FloatTensor ,lowercase__ : bool = False ,lowercase__ : bool = True ,lowercase__ : Optional[torch.Generator] = None ,):
__lowercase = sample
__lowercase = self.encode(lowercase__ ).latent_dist
if sample_posterior:
__lowercase = posterior.sample(generator=lowercase__ )
else:
__lowercase = posterior.mode()
__lowercase = self.decode(lowercase__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase__ )
| 41
|
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
a__ = None
a__ = {
'''7B''': 1_1008,
'''13B''': 1_3824,
'''30B''': 1_7920,
'''65B''': 2_2016,
'''70B''': 2_8672,
}
a__ = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def A__ (snake_case : str , snake_case : Optional[Any]=1 , snake_case : Optional[int]=2_56 ) -> Dict:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def A__ (snake_case : Dict ) -> Optional[Any]:
with open(snake_case , """r""" ) as f:
return json.load(snake_case )
def A__ (snake_case : List[Any] , snake_case : Any ) -> Dict:
with open(snake_case , """w""" ) as f:
json.dump(snake_case , snake_case )
def A__ (snake_case : Tuple , snake_case : List[Any] , snake_case : List[str] , snake_case : str=True ) -> int:
os.makedirs(snake_case , exist_ok=snake_case )
__UpperCamelCase : Optional[int] = os.path.join(snake_case , """tmp""" )
os.makedirs(snake_case , exist_ok=snake_case )
__UpperCamelCase : Any = read_json(os.path.join(snake_case , """params.json""" ) )
__UpperCamelCase : Dict = NUM_SHARDS[model_size]
__UpperCamelCase : List[Any] = params["""n_layers"""]
__UpperCamelCase : Any = params["""n_heads"""]
__UpperCamelCase : Optional[int] = n_heads // num_shards
__UpperCamelCase : Tuple = params["""dim"""]
__UpperCamelCase : Optional[int] = dim // n_heads
__UpperCamelCase : Dict = 10000.0
__UpperCamelCase : str = 1.0 / (base ** (torch.arange(0 , snake_case , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
__UpperCamelCase : Any = params["""n_kv_heads"""] # for GQA / MQA
__UpperCamelCase : Optional[int] = n_heads_per_shard // num_key_value_heads
__UpperCamelCase : List[Any] = dim // num_key_value_heads
else: # compatibility with other checkpoints
__UpperCamelCase : Optional[Any] = n_heads
__UpperCamelCase : Any = n_heads_per_shard
__UpperCamelCase : str = dim
# permute for sliced rotary
def permute(snake_case : Tuple , snake_case : Dict=n_heads , snake_case : str=dim , snake_case : Optional[Any]=dim ):
return w.view(snake_case , dima // n_heads // 2 , 2 , snake_case ).transpose(1 , 2 ).reshape(snake_case , snake_case )
print(F'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
__UpperCamelCase : Any = torch.load(os.path.join(snake_case , """consolidated.00.pth""" ) , map_location="""cpu""" )
else:
# Sharded
__UpperCamelCase : Optional[int] = [
torch.load(os.path.join(snake_case , F'''consolidated.{i:02d}.pth''' ) , map_location="""cpu""" )
for i in range(snake_case )
]
__UpperCamelCase : Optional[int] = 0
__UpperCamelCase : List[str] = {"""weight_map""": {}}
for layer_i in range(snake_case ):
__UpperCamelCase : Tuple = F'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
__UpperCamelCase : Optional[Any] = {
F'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wq.weight'''] ),
F'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wk.weight'''] ),
F'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[F'''layers.{layer_i}.attention.wv.weight'''],
F'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[F'''layers.{layer_i}.attention.wo.weight'''],
F'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w1.weight'''],
F'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w2.weight'''],
F'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w3.weight'''],
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[F'''layers.{layer_i}.attention_norm.weight'''],
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[F'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
__UpperCamelCase : int = {
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.attention_norm.weight'''
].clone(),
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
__UpperCamelCase : Union[str, Any] = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wq.weight'''].view(snake_case , snake_case , snake_case )
for i in range(snake_case )
] , dim=0 , ).reshape(snake_case , snake_case ) )
__UpperCamelCase : int = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wk.weight'''].view(
snake_case , snake_case , snake_case )
for i in range(snake_case )
] , dim=0 , ).reshape(snake_case , snake_case ) , snake_case , snake_case , snake_case , )
__UpperCamelCase : Dict = torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wv.weight'''].view(
snake_case , snake_case , snake_case )
for i in range(snake_case )
] , dim=0 , ).reshape(snake_case , snake_case )
__UpperCamelCase : str = torch.cat(
[loaded[i][F'''layers.{layer_i}.attention.wo.weight'''] for i in range(snake_case )] , dim=1 )
__UpperCamelCase : Dict = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(snake_case )] , dim=0 )
__UpperCamelCase : Dict = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(snake_case )] , dim=1 )
__UpperCamelCase : List[str] = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(snake_case )] , dim=0 )
__UpperCamelCase : Dict = inv_freq
for k, v in state_dict.items():
__UpperCamelCase : str = filename
param_count += v.numel()
torch.save(snake_case , os.path.join(snake_case , snake_case ) )
__UpperCamelCase : Dict = F'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
__UpperCamelCase : Dict = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
__UpperCamelCase : Dict = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(snake_case )] , dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(snake_case )] , dim=0 ),
}
for k, v in state_dict.items():
__UpperCamelCase : str = filename
param_count += v.numel()
torch.save(snake_case , os.path.join(snake_case , snake_case ) )
# Write configs
__UpperCamelCase : Union[str, Any] = {"""total_size""": param_count * 2}
write_json(snake_case , os.path.join(snake_case , """pytorch_model.bin.index.json""" ) )
__UpperCamelCase : str = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
__UpperCamelCase : str = params["""multiple_of"""] if """multiple_of""" in params else 2_56
__UpperCamelCase : str = LlamaConfig(
hidden_size=snake_case , intermediate_size=compute_intermediate_size(snake_case , snake_case , snake_case ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=snake_case , )
config.save_pretrained(snake_case )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
__UpperCamelCase : Optional[int] = LlamaForCausalLM.from_pretrained(snake_case , torch_dtype=torch.floataa , low_cpu_mem_usage=snake_case )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(snake_case , safe_serialization=snake_case )
shutil.rmtree(snake_case )
def A__ (snake_case : Any , snake_case : str ) -> Union[str, Any]:
# Initialize the tokenizer based on the `spm` model
__UpperCamelCase : Union[str, Any] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
__UpperCamelCase : Tuple = tokenizer_class(snake_case )
tokenizer.save_pretrained(snake_case )
def A__ () -> int:
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , )
parser.add_argument(
"""--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , )
parser.add_argument(
"""--output_dir""" , help="""Location to write HF model and tokenizer""" , )
parser.add_argument("""--safe_serialization""" , type=snake_case , help="""Whether or not to save using `safetensors`.""" )
__UpperCamelCase : str = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
__UpperCamelCase : int = os.path.join(args.input_dir , """tokenizer.model""" )
write_tokenizer(args.output_dir , snake_case )
if __name__ == "__main__":
main()
| 279
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( a__ ):
lowercase__ = "data2vec-vision"
def __init__( self , __UpperCamelCase=768 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3072 , __UpperCamelCase="gelu" , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1E-12 , __UpperCamelCase=224 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=True , __UpperCamelCase=[3, 5, 7, 11] , __UpperCamelCase=[1, 2, 3, 6] , __UpperCamelCase=True , __UpperCamelCase=0.4 , __UpperCamelCase=256 , __UpperCamelCase=1 , __UpperCamelCase=False , __UpperCamelCase=255 , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(**_A )
__a : Tuple = hidden_size
__a : Dict = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Any = intermediate_size
__a : Optional[int] = hidden_act
__a : Optional[Any] = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : List[str] = initializer_range
__a : Tuple = layer_norm_eps
__a : Union[str, Any] = image_size
__a : Any = patch_size
__a : Union[str, Any] = num_channels
__a : Optional[Any] = use_mask_token
__a : List[str] = use_absolute_position_embeddings
__a : int = use_relative_position_bias
__a : int = use_shared_relative_position_bias
__a : Optional[Any] = layer_scale_init_value
__a : List[str] = drop_path_rate
__a : Optional[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
__a : int = out_indices
__a : Optional[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
__a : List[str] = use_auxiliary_head
__a : Any = auxiliary_loss_weight
__a : Union[str, Any] = auxiliary_channels
__a : Optional[Any] = auxiliary_num_convs
__a : Tuple = auxiliary_concat_input
__a : Dict = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE__ ( a__ ):
lowercase__ = version.parse("1.11" )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 1E-4
| 716
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
for attribute in key.split(""".""" ):
__a : str = getattr(lowercase , lowercase )
if weight_type is not None:
__a : Dict = getattr(lowercase , lowercase ).shape
else:
__a : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__a : Any = value
elif weight_type == "weight_g":
__a : int = value
elif weight_type == "weight_v":
__a : int = value
elif weight_type == "bias":
__a : List[Any] = value
elif weight_type == "running_mean":
__a : Union[str, Any] = value
elif weight_type == "running_var":
__a : Tuple = value
elif weight_type == "num_batches_tracked":
__a : Optional[int] = value
elif weight_type == "inv_freq":
__a : List[str] = value
else:
__a : List[str] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( lowercase , lowercase , lowercase ) -> Dict:
__a : Dict = []
__a : Dict = fairseq_model.state_dict()
__a : Tuple = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__a : int = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , )
__a : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
__a : Optional[int] = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__a : str = True
if "*" in mapped_key:
__a : Optional[int] = name.split(lowercase )[0].split(""".""" )[-2]
__a : List[Any] = mapped_key.replace("""*""" , lowercase )
if "pos_bias_u" in name:
__a : Union[str, Any] = None
elif "pos_bias_v" in name:
__a : List[Any] = None
elif "weight_g" in name:
__a : List[Any] = """weight_g"""
elif "weight_v" in name:
__a : List[Any] = """weight_v"""
elif "bias" in name:
__a : Optional[int] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a : str = """weight"""
elif "running_mean" in name:
__a : List[str] = """running_mean"""
elif "inv_freq" in name:
__a : Dict = """inv_freq"""
elif "running_var" in name:
__a : Union[str, Any] = """running_var"""
elif "num_batches_tracked" in name:
__a : int = """num_batches_tracked"""
else:
__a : Optional[int] = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
__a : Optional[Any] = full_name.split("""conv_layers.""" )[-1]
__a : Union[str, Any] = name.split(""".""" )
__a : Optional[Any] = int(items[0] )
__a : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__a : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__a : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__a : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__a : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _snake_case ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Optional[Any]:
if config_path is not None:
__a : Any = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act="""swish""" )
else:
__a : Optional[int] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__a : Optional[Any] = """rotary"""
if is_finetuned:
if dict_path:
__a : List[Any] = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a : int = target_dict.pad_index
__a : List[str] = target_dict.bos_index
__a : str = target_dict.eos_index
__a : Dict = len(target_dict.symbols )
__a : Any = os.path.join(lowercase , """vocab.json""" )
if not os.path.isdir(lowercase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
__a : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
__a : Optional[Any] = 0
__a : List[Any] = 1
with open(lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowercase , lowercase )
__a : int = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase , )
__a : Optional[int] = True if config.feat_extract_norm == """layer""" else False
__a : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
__a : str = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
__a : List[str] = WavaVecaConformerForCTC(lowercase )
else:
__a : Optional[int] = WavaVecaConformerForPreTraining(lowercase )
if is_finetuned:
__a , __a , __a : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__a : Optional[int] = argparse.Namespace(task="""audio_pretraining""" )
__a : Tuple = fairseq.tasks.setup_task(lowercase )
__a , __a , __a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase )
__a : Any = model[0].eval()
recursively_load_weights(lowercase , lowercase , not is_finetuned )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 697
| 0
|
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_UpperCamelCase = pd.read_csv('sample_data.csv', header=None)
_UpperCamelCase = df.shape[:1][0]
# If you're using some other dataset input the target column
_UpperCamelCase = df.iloc[:, 1:2]
_UpperCamelCase = actual_data.values.reshape(len_data, 1)
_UpperCamelCase = MinMaxScaler().fit_transform(actual_data)
_UpperCamelCase = 10
_UpperCamelCase = 5
_UpperCamelCase = 20
_UpperCamelCase = len_data - periods * look_back
_UpperCamelCase = actual_data[:division]
_UpperCamelCase = actual_data[division - look_back :]
_UpperCamelCase , _UpperCamelCase = [], []
_UpperCamelCase , _UpperCamelCase = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_UpperCamelCase = np.array(train_x)
_UpperCamelCase = np.array(test_x)
_UpperCamelCase = np.array([list(i.ravel()) for i in train_y])
_UpperCamelCase = np.array([list(i.ravel()) for i in test_y])
_UpperCamelCase = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
_UpperCamelCase = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
_UpperCamelCase = model.predict(x_test)
| 179
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_UpperCamelCase = None
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_UpperCamelCase = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
_UpperCamelCase = {
'camembert-base': 512,
}
_UpperCamelCase = '▁'
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : List[str] = VOCAB_FILES_NAMES
__snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[str] = ["""input_ids""", """attention_mask"""]
__snake_case : List[Any] = CamembertTokenizer
def __init__( self :List[Any] , __lowercase :Optional[int]=None , __lowercase :str=None , __lowercase :Optional[Any]="<s>" , __lowercase :List[str]="</s>" , __lowercase :Tuple="</s>" , __lowercase :int="<s>" , __lowercase :Union[str, Any]="<unk>" , __lowercase :Optional[int]="<pad>" , __lowercase :Union[str, Any]="<mask>" , __lowercase :Tuple=["<s>NOTUSED", "</s>NOTUSED"] , **__lowercase :List[str] , ):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : Optional[int] =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
__lowercase , tokenizer_file=__lowercase , bos_token=__lowercase , eos_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , )
__lowerCamelCase : Any =vocab_file
__lowerCamelCase : Union[str, Any] =False if not self.vocab_file else True
def __lowercase ( self :List[str] , __lowercase :List[int] , __lowercase :Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase : Tuple =[self.cls_token_id]
__lowerCamelCase : str =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self :Tuple , __lowercase :List[int] , __lowercase :Optional[List[int]] = None ):
__lowerCamelCase : Any =[self.sep_token_id]
__lowerCamelCase : str =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowercase ( self :List[Any] , __lowercase :str , __lowercase :Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase : List[str] =os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 179
| 1
|
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def _UpperCamelCase ( UpperCamelCase_ : SplitDict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = split_dict._to_yaml_list()
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
lowerCAmelCase__ = SplitDict._from_yaml_list(UpperCamelCase_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase__ = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase__ = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=UpperCamelCase_ ), SplitInfo(dataset_name='my_dataset' )] )
def _UpperCamelCase ( UpperCamelCase_ : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 365
|
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case : int = logging.get_logger("""transformers.models.encodec""")
__snake_case : Tuple = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__snake_case : List[Any] = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__snake_case : str = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__snake_case : str = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__snake_case : Any = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__snake_case : int = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case : int = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case : Union[str, Any] = []
__snake_case : Tuple = []
def _UpperCamelCase ( UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] ) -> List[str]:
"""simple docstring"""
for attribute in key.split('.' ):
lowerCAmelCase__ = getattr(UpperCamelCase_ , UpperCamelCase_ )
if weight_type is not None:
lowerCAmelCase__ = getattr(UpperCamelCase_ , UpperCamelCase_ ).shape
else:
lowerCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
elif weight_type == "running_mean":
lowerCAmelCase__ = value
elif weight_type == "running_var":
lowerCAmelCase__ = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase__ = value
elif weight_type == "weight_ih_l0":
lowerCAmelCase__ = value
elif weight_type == "weight_hh_l0":
lowerCAmelCase__ = value
elif weight_type == "bias_ih_l0":
lowerCAmelCase__ = value
elif weight_type == "bias_hh_l0":
lowerCAmelCase__ = value
elif weight_type == "weight_ih_l1":
lowerCAmelCase__ = value
elif weight_type == "weight_hh_l1":
lowerCAmelCase__ = value
elif weight_type == "bias_ih_l1":
lowerCAmelCase__ = value
elif weight_type == "bias_hh_l1":
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(F"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def _UpperCamelCase ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] ) -> Tuple:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase__ , lowerCAmelCase__ = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _UpperCamelCase ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCAmelCase__ = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCAmelCase__ = MAPPING_48K
else:
raise ValueError(F"Unsupported model: {model_name}" )
for name, value in orig_dict.items():
if should_ignore(UpperCamelCase_ , UpperCamelCase_ ):
logger.info(F"{name} was ignored" )
continue
lowerCAmelCase__ = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCAmelCase__ , lowerCAmelCase__ = key.split('.*.' )
if prefix in name and suffix in name:
lowerCAmelCase__ = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(UpperCamelCase_ )[0].split('.' )[-2]
lowerCAmelCase__ = mapped_key.replace('*' , UpperCamelCase_ )
if "weight_g" in name:
lowerCAmelCase__ = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase__ = 'weight_v'
elif "weight_ih_l0" in name:
lowerCAmelCase__ = 'weight_ih_l0'
elif "weight_hh_l0" in name:
lowerCAmelCase__ = 'weight_hh_l0'
elif "bias_ih_l0" in name:
lowerCAmelCase__ = 'bias_ih_l0'
elif "bias_hh_l0" in name:
lowerCAmelCase__ = 'bias_hh_l0'
elif "weight_ih_l1" in name:
lowerCAmelCase__ = 'weight_ih_l1'
elif "weight_hh_l1" in name:
lowerCAmelCase__ = 'weight_hh_l1'
elif "bias_ih_l1" in name:
lowerCAmelCase__ = 'bias_ih_l1'
elif "bias_hh_l1" in name:
lowerCAmelCase__ = 'bias_hh_l1'
elif "bias" in name:
lowerCAmelCase__ = 'bias'
elif "weight" in name:
lowerCAmelCase__ = 'weight'
elif "running_mean" in name:
lowerCAmelCase__ = 'running_mean'
elif "running_var" in name:
lowerCAmelCase__ = 'running_var'
elif "num_batches_tracked" in name:
lowerCAmelCase__ = 'num_batches_tracked'
else:
lowerCAmelCase__ = None
set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
continue
if not is_used:
unused_weights.append(UpperCamelCase_ )
logger.warning(F"Unused weights: {unused_weights}" )
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : List[Any]=None , ) -> Dict:
"""simple docstring"""
if config_path is not None:
lowerCAmelCase__ = EncodecConfig.from_pretrained(UpperCamelCase_ )
else:
lowerCAmelCase__ = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCAmelCase__ = [8, 5, 4, 4]
lowerCAmelCase__ = [2.2]
lowerCAmelCase__ = 64
lowerCAmelCase__ = 3_2000
lowerCAmelCase__ = 2048
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
elif model_name == "encodec_48khz":
lowerCAmelCase__ = [8, 5, 4, 2]
lowerCAmelCase__ = [3.0, 6.0, 12.0, 24.0]
lowerCAmelCase__ = 4_8000
lowerCAmelCase__ = 2
lowerCAmelCase__ = False
lowerCAmelCase__ = 'time_group_norm'
lowerCAmelCase__ = True
lowerCAmelCase__ = 1.0
lowerCAmelCase__ = 0.01
else:
raise ValueError(F"Unknown model name: {model_name}" )
lowerCAmelCase__ = EncodecModel(UpperCamelCase_ )
lowerCAmelCase__ = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = torch.load(UpperCamelCase_ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCAmelCase__ = original_checkpoint['best_state']
recursively_load_weights(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(UpperCamelCase_ )
model.push_to_hub(UpperCamelCase_ )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__snake_case : Dict = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 365
| 1
|
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
a_ = {
'/attention/': '/0/SelfAttention/',
'/self_attention/': '/0/SelfAttention/',
'/encoder_decoder_attention/': '/1/EncDecAttention/',
'value': 'v',
'query': 'q',
'key': 'k',
'out': 'o',
'pre_self_attention_layer_norm': '0/layer_norm',
'pre_cross_attention_layer_norm': '1/layer_norm',
'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong
'token_embedder': 'shared',
'encoder_norm': 'final_layer_norm',
'decoder_norm': 'final_layer_norm',
'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight',
'router/router_weights/w/': 'router/classifier/',
'roer/roer_weights/w/': 'router/classifier/',
'logits_dense': 'lm_head',
}
def __UpperCAmelCase ( __UpperCamelCase ):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
__lowercase : List[Any] = list(s_dict.keys() )
for key in keys:
__lowercase : Optional[int] = R'''.*/layers_(\d+)'''
__lowercase : Tuple = key
if re.match(__UpperCamelCase , __UpperCamelCase ):
__lowercase : Optional[Any] = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , __UpperCamelCase )
__lowercase : str = R'''(encoder|decoder)\/'''
if re.match(__UpperCamelCase , __UpperCamelCase ):
__lowercase : Union[str, Any] = re.match(__UpperCamelCase , __UpperCamelCase ).groups()
if groups[0] == "encoder":
__lowercase : Dict = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , __UpperCamelCase )
__lowercase : Union[str, Any] = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , __UpperCamelCase )
elif groups[0] == "decoder":
__lowercase : Optional[Any] = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , __UpperCamelCase )
__lowercase : Dict = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , __UpperCamelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
__lowercase : Optional[Any] = new_key.replace(__UpperCamelCase , __UpperCamelCase )
print(f"""{key} -> {new_key}""" )
__lowercase : Union[str, Any] = s_dict.pop(__UpperCamelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__lowercase : Optional[Any] = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__lowercase : List[str] = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
__lowercase : List[str] = s_dict[key].shape[0]
__lowercase : str = s_dict[key]
for idx in range(__UpperCamelCase ):
__lowercase : str = expert_weihts[idx]
print(f"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(__UpperCamelCase )
return s_dict
a_ = {
'NUM_ENCODER_LAYERS': 'num_layers',
'NUM_DECODER_LAYERS': 'num_decoder_layers',
'NUM_HEADS': 'num_heads',
'HEAD_DIM': 'd_kv',
'EMBED_DIM': 'd_model',
'MLP_DIM': 'd_ff',
'NUM_SELECTED_EXPERTS': 'num_selected_experts',
'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers',
'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers',
'dense.MlpBlock.activations': 'feed_forward_proj',
}
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
# Convert a google style config to the hugging face fromat
import regex as re
with open(__UpperCamelCase , '''r''' ) as f:
__lowercase : Dict = f.read()
__lowercase : Tuple = re.findall(R'''(.*) = ([0-9.]*)''' , __UpperCamelCase )
__lowercase : Union[str, Any] = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
__lowercase : Tuple = float(__UpperCamelCase ) if '''.''' in value else int(__UpperCamelCase )
__lowercase : Any = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , __UpperCamelCase )[0]
__lowercase : Optional[int] = str(activation[1] )
__lowercase : int = num_experts
__lowercase : Optional[int] = SwitchTransformersConfig(**__UpperCamelCase )
return config
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase="./" , __UpperCamelCase=8 ):
# Initialise PyTorch model
print(f"""Loading flax weights from : {flax_checkpoint_path}""" )
__lowercase : Optional[int] = checkpoints.load_tax_checkpoint(__UpperCamelCase )
if gin_file is not None:
__lowercase : Union[str, Any] = convert_gin_to_config(__UpperCamelCase , __UpperCamelCase )
else:
__lowercase : List[Any] = SwitchTransformersConfig.from_pretrained(__UpperCamelCase )
__lowercase : Optional[Any] = SwitchTransformersForConditionalGeneration(__UpperCamelCase )
__lowercase : int = flax_params['''target''']
__lowercase : List[Any] = flatten_dict(__UpperCamelCase , sep='''/''' )
__lowercase : int = rename_keys(__UpperCamelCase )
__lowercase : Any = unflatten_dict(__UpperCamelCase , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__UpperCamelCase , __UpperCamelCase )
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'
' model architecture. If not provided, a `gin_file` has to be provided.'
),
)
parser.add_argument(
'--gin_file',
default=None,
type=str,
required=False,
help='Path to the gin config file. If not provided, a `config_file` has to be passed ',
)
parser.add_argument(
'--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.'
)
parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts')
a_ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 76
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : Optional[int] = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 447
|
from math import sqrt
def _lowerCAmelCase ( __lowerCamelCase : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( __lowerCamelCase : int = 10001 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = 0
__SCREAMING_SNAKE_CASE : Dict = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCamelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCamelCase ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 447
| 1
|
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__a = """."""
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__a = [
"""Assert""",
"""AssignVariableOp""",
"""EmptyTensorList""",
"""MergeV2Checkpoints""",
"""ReadVariableOp""",
"""ResourceGather""",
"""RestoreV2""",
"""SaveV2""",
"""ShardedFilename""",
"""StatefulPartitionedCall""",
"""StaticRegexFullMatch""",
"""VarHandleOp""",
]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
snake_case__ : Union[str, Any] = SavedModel()
snake_case__ : str = []
with open(os.path.join(_A , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
snake_case__ : Optional[int] = json.load(_A )["""opsets"""]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_A )] )
with open(_A , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
snake_case__ : Dict = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
snake_case__ : Any = sorted(_A )
snake_case__ : Dict = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_A )
if strict and len(_A ) > 0:
raise Exception(f"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(_A ) > 0:
print(f"Found the following incompatible ops for the opset {opset}:" )
print(*_A , sep="""\n""" )
else:
print(f"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
__a = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 374
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "char"
_UpperCAmelCase = "bpe"
_UpperCAmelCase = "wp"
__UpperCamelCase : Optional[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = ["image_processor", "char_tokenizer"]
_UpperCAmelCase = "ViTImageProcessor"
_UpperCAmelCase = "MgpstrTokenizer"
def __init__( self: Optional[int] , UpperCamelCase: Dict=None , UpperCamelCase: Any=None , **UpperCamelCase: Any ) -> str:
snake_case__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
snake_case__ = kwargs.pop('feature_extractor' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
snake_case__ = tokenizer
snake_case__ = AutoTokenizer.from_pretrained('gpt2' )
snake_case__ = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self: str , UpperCamelCase: List[str]=None , UpperCamelCase: Any=None , UpperCamelCase: Optional[Any]=None , **UpperCamelCase: Optional[int] ) -> List[str]:
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
snake_case__ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None:
snake_case__ = self.char_tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case__ = encodings['input_ids']
return inputs
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: List[str] ) -> int:
snake_case__ , snake_case__ , snake_case__ = sequences
snake_case__ = char_preds.size(0 )
snake_case__ , snake_case__ = self._decode_helper(UpperCamelCase , 'char' )
snake_case__ , snake_case__ = self._decode_helper(UpperCamelCase , 'bpe' )
snake_case__ , snake_case__ = self._decode_helper(UpperCamelCase , 'wp' )
snake_case__ = []
snake_case__ = []
for i in range(UpperCamelCase ):
snake_case__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
snake_case__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
snake_case__ = scores.index(max(UpperCamelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
snake_case__ = {}
snake_case__ = final_strs
snake_case__ = final_scores
snake_case__ = char_strs
snake_case__ = bpe_strs
snake_case__ = wp_strs
return out
def lowerCAmelCase_ ( self: str , UpperCamelCase: str , UpperCamelCase: Tuple ) -> Optional[int]:
if format == DecodeType.CHARACTER:
snake_case__ = self.char_decode
snake_case__ = 1
snake_case__ = '[s]'
elif format == DecodeType.BPE:
snake_case__ = self.bpe_decode
snake_case__ = 2
snake_case__ = '#'
elif format == DecodeType.WORDPIECE:
snake_case__ = self.wp_decode
snake_case__ = 1_02
snake_case__ = '[SEP]'
else:
raise ValueError(F'''Format {format} is not supported.''' )
snake_case__ , snake_case__ = [], []
snake_case__ = pred_logits.size(0 )
snake_case__ = pred_logits.size(1 )
snake_case__ , snake_case__ = pred_logits.topk(1 , dim=-1 , largest=UpperCamelCase , sorted=UpperCamelCase )
snake_case__ = preds_index.view(-1 , UpperCamelCase )[:, 1:]
snake_case__ = decoder(UpperCamelCase )
snake_case__ , snake_case__ = torch.nn.functional.softmax(UpperCamelCase , dim=2 ).max(dim=2 )
snake_case__ = preds_max_prob[:, 1:]
for index in range(UpperCamelCase ):
snake_case__ = preds_str[index].find(UpperCamelCase )
snake_case__ = preds_str[index][:pred_eos]
snake_case__ = preds_index[index].cpu().tolist()
snake_case__ = pred_index.index(UpperCamelCase ) if eos_token in pred_index else -1
snake_case__ = preds_max_prob[index][: pred_eos_index + 1]
snake_case__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(UpperCamelCase )
conf_scores.append(UpperCamelCase )
return dec_strs, conf_scores
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: str ) -> int:
snake_case__ = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(UpperCamelCase )]
return decode_strs
def lowerCAmelCase_ ( self: int , UpperCamelCase: Optional[int] ) -> Dict:
return self.bpe_tokenizer.batch_decode(UpperCamelCase )
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: str ) -> Union[str, Any]:
snake_case__ = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(UpperCamelCase )]
return decode_strs
| 328
| 0
|
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class snake_case ( _a ):
__magic_name__ = 42
__magic_name__ = None
def snake_case (A_ :Tuple , A_ :List[str]=0.999 , A_ :int="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(A_ :str ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A_ :List[str] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
a : str = []
for i in range(UpperCAmelCase__ ):
a : List[Any] = i / num_diffusion_timesteps
a : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCAmelCase__ ) / alpha_bar_fn(UpperCAmelCase__ ) , UpperCAmelCase__ ) )
return torch.tensor(UpperCAmelCase__ , dtype=torch.floataa )
class snake_case ( _a , _a ):
@register_to_config
def __init__( self : int , A : Dict = 1_0_0_0 , A : Any = "fixed_small_log" , A : int = True , A : str = 1.0 , A : Union[str, Any] = "epsilon" , A : List[Any] = "squaredcos_cap_v2" , ):
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'' )
a : int = betas_for_alpha_bar(_A )
a : Optional[Any] = 1.0 - self.betas
a : str = torch.cumprod(self.alphas , dim=0 )
a : int = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
a : Any = 1.0
# setable values
a : Tuple = None
a : int = torch.from_numpy(np.arange(0 , _A )[::-1].copy() )
a : int = variance_type
def lowerCamelCase__ ( self : Union[str, Any] , A : List[Any] , A : Optional[Any] = None ):
'''simple docstring'''
return sample
def lowerCamelCase__ ( self : str , A : Tuple , A : Any = None ):
'''simple docstring'''
a : Tuple = num_inference_steps
a : Tuple = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
a : Tuple = (np.arange(0 , _A ) * step_ratio).round()[::-1].copy().astype(np.intaa )
a : List[Any] = torch.from_numpy(_A ).to(_A )
def lowerCamelCase__ ( self : Union[str, Any] , A : str , A : int=None , A : Any=None , A : str=None ):
'''simple docstring'''
if prev_timestep is None:
a : Union[str, Any] = t - 1
a : Union[str, Any] = self.alphas_cumprod[t]
a : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
a : List[str] = 1 - alpha_prod_t
a : List[str] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
a : List[Any] = self.betas[t]
else:
a : int = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
a : str = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
a : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
a : List[Any] = torch.log(torch.clamp(_A , min=1E-20 ) )
a : Optional[Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
a : int = variance.log()
a : str = beta.log()
a : str = (predicted_variance + 1) / 2
a : Dict = frac * max_log + (1 - frac) * min_log
return variance
def lowerCamelCase__ ( self : List[str] , A : List[str] , A : List[Any] , A : List[Any] , A : int = None , A : List[str]=None , A : Union[str, Any] = True , ):
'''simple docstring'''
a : Optional[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
a, a : List[str] = torch.split(_A , sample.shape[1] , dim=1 )
else:
a : Optional[Any] = None
# 1. compute alphas, betas
if prev_timestep is None:
a : Dict = t - 1
a : int = self.alphas_cumprod[t]
a : Tuple = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
a : Optional[Any] = 1 - alpha_prod_t
a : int = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
a : str = self.betas[t]
a : Tuple = self.alphas[t]
else:
a : Tuple = 1 - alpha_prod_t / alpha_prod_t_prev
a : Optional[int] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
a : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
a : Union[str, Any] = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
' for the UnCLIPScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
a : Optional[int] = torch.clamp(
_A , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
a : List[Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
a : Optional[Any] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
a : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
a : Dict = 0
if t > 0:
a : Optional[int] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=_A , device=model_output.device )
a : int = self._get_variance(
_A , predicted_variance=_A , prev_timestep=_A , )
if self.variance_type == "fixed_small_log":
a : Dict = variance
elif self.variance_type == "learned_range":
a : str = (0.5 * variance).exp()
else:
raise ValueError(
F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
' for the UnCLIPScheduler.' )
a : Optional[int] = variance * variance_noise
a : Tuple = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=_A , pred_original_sample=_A )
def lowerCamelCase__ ( self : Union[str, Any] , A : Tuple , A : Tuple , A : str , ):
'''simple docstring'''
a : List[str] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
a : Union[str, Any] = timesteps.to(original_samples.device )
a : List[str] = alphas_cumprod[timesteps] ** 0.5
a : Any = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
a : int = sqrt_alpha_prod.unsqueeze(-1 )
a : List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5
a : List[Any] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
a : Optional[Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
a : Tuple = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 714
|
"""simple docstring"""
from __future__ import annotations
def snake_case (A_ :float , A_ :float , A_ :float ):
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 118
| 0
|
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
A_ : Dict = 4
A_ : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
A_ : str = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 667
|
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCamelCase :int = logging.getLogger(__name__)
lowerCamelCase :List[Any] = 5_0 # max width of layer names
lowerCamelCase :List[Any] = 7_0 # max width of quantizer names
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""" , type=lowerCamelCase__ , default=8 , help="""weight precision""" )
group.add_argument("""--aprec""" , type=lowerCamelCase__ , default=8 , help="""activation precision""" )
group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""" , type=lowerCamelCase__ , nargs="""+""" , help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""" , type=lowerCamelCase__ , help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""" , type=lowerCamelCase__ , help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""" , metavar="""N""" , type=lowerCamelCase__ , help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""" , action="""store_true""" , help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
) , )
def a ( lowerCamelCase__ ):
'''simple docstring'''
if args.calibrator == "max":
A_ : Union[str, Any] = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
A_ : int = """histogram"""
elif args.calibrator == "mse":
A_ : Dict = """histogram"""
else:
raise ValueError(f'Invalid calibrator {args.calibrator}' )
A_ : int = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase__ )
A_ : Optional[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ):
'''simple docstring'''
logger.info("""Configuring Model for Quantization""" )
logger.info(f'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase__ , ["""embeddings"""] , which="""weight""" , _disabled=lowerCamelCase__ )
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase__ , [""""""] , _disabled=lowerCamelCase__ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase__ , args.quant_disable_keyword , _disabled=lowerCamelCase__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=lowerCamelCase__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=lowerCamelCase__ )
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase__ )
if args.fuse_qkv:
fuse_qkv(lowerCamelCase__ , lowerCamelCase__ )
if args.clip_gelu:
clip_gelu(lowerCamelCase__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'{name:80}: {module}' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
def fusea(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase__ , """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
A_ : List[Any] = qq._amax.detach().item()
A_ : Optional[int] = qk._amax.detach().item()
A_ : Dict = qv._amax.detach().item()
A_ : Any = max(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
qq._amax.fill_(lowerCamelCase__ )
qk._amax.fill_(lowerCamelCase__ )
qv._amax.fill_(lowerCamelCase__ )
logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(f'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
A_ : Optional[int] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase__ )
A_ : Dict = mod._input_quantizer._amax.data.detach().item()
logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
A_ : Tuple = mod.weight.shape[0]
A_ : Dict = mod._weight_quantizer._amax.detach()
A_ : List[Any] = torch.ones(lowerCamelCase__ , dtype=amax.dtype , device=amax.device ) * amax
print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer , """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
A_ : Dict = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
A_ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set
A_ : int = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase__ , keepdims=lowerCamelCase__ ).detach()
logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
A_ : str = amax
def a ( lowerCamelCase__ , lowerCamelCase__=25 , lowerCamelCase__=1_80 , lowerCamelCase__=None ):
'''simple docstring'''
if ignore is None:
A_ : int = []
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = [ignore]
A_ : Optional[Any] = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
A_ : List[str] = max(lowerCamelCase__ , len(lowerCamelCase__ ) )
for name, mod in model.named_modules():
A_ : Tuple = getattr(lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ )
A_ : List[Any] = getattr(lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ )
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
if type(lowerCamelCase__ ) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase__ ) is str and s in name]:
continue
A_ : Optional[int] = f'Act:{input_q.extra_repr()}'
A_ : Dict = f'Wgt:{weight_q.extra_repr()}'
A_ : List[Any] = f'{name:{name_width}} {act_str} {wgt_str}'
if len(lowerCamelCase__ ) <= line_width:
logger.info(lowerCamelCase__ )
else:
logger.info(f'{name:{name_width}} {act_str}' )
logger.info(f'{" ":{name_width}} {wgt_str}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase__ , pytorch_quantization.nn.TensorQuantizer ):
print(f'{name:80} {mod}' )
count += 1
print(f'{count} TensorQuantizers found in model' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if quantizer_mod is not None:
assert hasattr(lowerCamelCase__ , lowerCamelCase__ )
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
logger.warning(f'{name} has no {quantizer}' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="both" , **lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = f'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
if which in ["input", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_input_quantizer""" ) or hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
set_quantizers(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Dict = f'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ )
| 667
| 1
|
import re
def _a ( UpperCamelCase_ : str ) -> list:
"""simple docstring"""
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def _a ( UpperCamelCase_ : str ) -> str:
"""simple docstring"""
lowerCAmelCase__ = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : bool , UpperCamelCase_ : str ) -> str:
"""simple docstring"""
try:
lowerCAmelCase__ = split_input(UpperCamelCase_ )
if upper:
lowerCAmelCase__ = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowerCAmelCase__ = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _a ( UpperCamelCase_ : str ) -> str:
"""simple docstring"""
return to_simple_case(UpperCamelCase_ )
def _a ( UpperCamelCase_ : str ) -> str:
"""simple docstring"""
try:
lowerCAmelCase__ = to_simple_case(UpperCamelCase_ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : bool ) -> str:
"""simple docstring"""
return to_complex_case(UpperCamelCase_ , UpperCamelCase_ , "_" )
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : bool ) -> str:
"""simple docstring"""
return to_complex_case(UpperCamelCase_ , UpperCamelCase_ , "-" )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 115
|
import random
def _a ( UpperCamelCase_ : int ) -> bool:
"""simple docstring"""
lowerCAmelCase__ = num - 1
lowerCAmelCase__ = 0
while s % 2 == 0:
lowerCAmelCase__ = s // 2
t += 1
for _ in range(5 ):
lowerCAmelCase__ = random.randrange(2 , num - 1 )
lowerCAmelCase__ = pow(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if v != 1:
lowerCAmelCase__ = 0
while v != (num - 1):
if i == t - 1:
return False
else:
lowerCAmelCase__ = i + 1
lowerCAmelCase__ = (v**2) % num
return True
def _a ( UpperCamelCase_ : int ) -> bool:
"""simple docstring"""
if num < 2:
return False
lowerCAmelCase__ = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(UpperCamelCase_ )
def _a ( UpperCamelCase_ : int = 1_024 ) -> int:
"""simple docstring"""
while True:
lowerCAmelCase__ = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(UpperCamelCase_ ):
return num
if __name__ == "__main__":
a_ = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 115
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase : Tuple = {
'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTBigCodeForSequenceClassification',
'GPTBigCodeForTokenClassification',
'GPTBigCodeForCausalLM',
'GPTBigCodeModel',
'GPTBigCodePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__: List[str] = logging.get_logger(__name__)
__magic_name__: str = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[str, Any] = '''open-llama'''
def __init__( self , lowerCAmelCase__=10_00_00 , lowerCAmelCase__=40_96 , lowerCAmelCase__=1_10_08 , lowerCAmelCase__=32 , lowerCAmelCase__=32 , lowerCAmelCase__="silu" , lowerCAmelCase__=20_48 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-6 , lowerCAmelCase__=True , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> int:
__magic_name__ : int = vocab_size
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Optional[Any] = hidden_size
__magic_name__ : Union[str, Any] = intermediate_size
__magic_name__ : Any = num_hidden_layers
__magic_name__ : Any = num_attention_heads
__magic_name__ : Optional[Any] = hidden_act
__magic_name__ : Optional[int] = initializer_range
__magic_name__ : Optional[Any] = rms_norm_eps
__magic_name__ : List[str] = use_cache
__magic_name__ : Dict = kwargs.pop(
"""use_memorry_efficient_attention""" , lowerCAmelCase__ )
__magic_name__ : Optional[int] = hidden_dropout_prob
__magic_name__ : str = attention_dropout_prob
__magic_name__ : List[str] = use_stable_embedding
__magic_name__ : Optional[Any] = shared_input_output_embedding
__magic_name__ : str = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , tie_word_embeddings=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __magic_name__ ( self ) -> List[str]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCAmelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F'got {self.rope_scaling}' )
__magic_name__ : int = self.rope_scaling.get("""type""" , lowerCAmelCase__ )
__magic_name__ : Dict = self.rope_scaling.get("""factor""" , lowerCAmelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 324
| 0
|
import numpy
class lowerCamelCase_ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
a_ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
a_ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
a_ = numpy.random.rand(3 , 1 )
# Real output values provided.
a_ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
a_ = numpy.zeros(output_array.shape )
def __magic_name__ ( self ):
a_ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
a_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
a_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __magic_name__ ( self ):
a_ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
a_ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
a_ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for iteration in range(1 , iterations + 1 ):
a_ = self.feedforward()
self.back_propagation()
if give_loss:
a_ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE ):
a_ = input_arr
a_ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
a_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
a_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : numpy.ndarray ) -> numpy.ndarray:
"""simple docstring"""
return 1 / (1 + numpy.exp(-value ))
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : numpy.ndarray ) -> numpy.ndarray:
"""simple docstring"""
return (value) * (1 - (value))
def __SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
a_ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
a_ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
a_ = TwoHiddenLayerNeuralNetwork(
input_array=UpperCamelCase , output_array=UpperCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=UpperCamelCase , iterations=10 , give_loss=UpperCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 403
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
if isinstance(UpperCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowerCamelCase_ :
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
pass
def __magic_name__ ( self ):
pass
def __magic_name__ ( self ):
pass
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ = np.abs((a - b) ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
a_ = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
a_ = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
a_ , a_ = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ = {"""vision_model""": vision_model, """text_model""": text_model}
a_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE )
a_ = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
a_ , a_ = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ = {"""vision_model""": vision_model, """text_model""": text_model}
a_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE )
a_ = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
a_ = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
a_ = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE )
a_ = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
a_ = after_output[0]
a_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
a_ , a_ = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ = {"""vision_model""": vision_model, """text_model""": text_model}
a_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE )
a_ = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
a_ = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
a_ = to_atuple(vision_model.config.image_size )
a_ = to_atuple(vision_model.config.patch_size )
a_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a_ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
a_ = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
pt_model.to(_SCREAMING_SNAKE_CASE )
pt_model.eval()
# prepare inputs
a_ = inputs_dict
a_ = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
a_ = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
a_ = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE )
a_ = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
a_ = fx_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE )
a_ = VisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE )
pt_model_loaded.to(_SCREAMING_SNAKE_CASE )
pt_model_loaded.eval()
with torch.no_grad():
a_ = pt_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output_loaded.numpy() , 4E-2 )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
a_ = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
a_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE )
a_ = fx_state
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
a_ = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
a_ = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params )
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = self.prepare_config_and_inputs()
self.check_save_load(**_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def __magic_name__ ( self ):
a_ = self.prepare_config_and_inputs()
a_ = config_inputs_dict.pop("""vision_config""" )
a_ = config_inputs_dict.pop("""text_config""" )
a_ = config_inputs_dict
self.check_equivalence_pt_to_flax(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.check_equivalence_flax_to_pt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __magic_name__ ( self ):
a_ , a_ = self.get_pretrained_model_and_inputs()
a_ = model_a(**_SCREAMING_SNAKE_CASE )
a_ = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_SCREAMING_SNAKE_CASE )
a_ = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE )
a_ = model_a(**_SCREAMING_SNAKE_CASE )
a_ = after_outputs[0]
a_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-5 )
@require_flax
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
def __magic_name__ ( self ):
a_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
a_ = 13
a_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
a_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
a_ = random_attention_mask([batch_size, 4] )
a_ = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ = FlaxViTModel(_SCREAMING_SNAKE_CASE )
a_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
return vision_model, text_model
def __magic_name__ ( self ):
a_ = FlaxViTModelTester(self )
a_ = FlaxBertModelTester(self )
a_ = vit_model_tester.prepare_config_and_inputs()
a_ = bert_model_tester.prepare_config_and_inputs()
a_ , a_ = vision_config_and_inputs
a_ , a_ , a_ , a_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
def __magic_name__ ( self ):
a_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
a_ = 13
a_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
a_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
a_ = random_attention_mask([batch_size, 4] )
a_ = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ = FlaxCLIPVisionModel(_SCREAMING_SNAKE_CASE )
a_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
return vision_model, text_model
def __magic_name__ ( self ):
a_ = FlaxCLIPVisionModelTester(self )
a_ = FlaxBertModelTester(self )
a_ = clip_model_tester.prepare_config_and_inputs()
a_ = bert_model_tester.prepare_config_and_inputs()
a_ , a_ = vision_config_and_inputs
a_ , a_ , a_ , a_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ):
a_ = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
a_ = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
a_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
a_ = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
a_ = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
a_ = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
| 403
| 1
|
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=5 ) -> Tuple:
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
_lowerCamelCase : str = torch.tensor(tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) ).unsqueeze(0 ) # Batch size 1
_lowerCamelCase : Tuple = model(_lowerCamelCase )[0] # The last hidden-state is the first element of the output tuple
_lowerCamelCase : str = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_lowerCamelCase : Union[str, Any] = logits[0, masked_index, :]
_lowerCamelCase : int = logits.softmax(dim=0 )
_lowerCamelCase, _lowerCamelCase : int = prob.topk(k=_lowerCamelCase , dim=0 )
_lowerCamelCase : Any = " ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_lowerCamelCase ) )] )
_lowerCamelCase : Optional[Any] = tokenizer.mask_token
_lowerCamelCase : Tuple = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
_lowerCamelCase : int = predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(_lowerCamelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(_lowerCamelCase ) , _lowerCamelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(_lowerCamelCase , _lowerCamelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
_lowerCAmelCase : Tuple = CamembertTokenizer.from_pretrained('''camembert-base''')
_lowerCAmelCase : Optional[Any] = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
_lowerCAmelCase : Any = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 46
|
'''simple docstring'''
def _UpperCAmelCase ( __A : int ):
a_ : Optional[Any] = []
a_ : Optional[Any] = []
a_ : List[str] = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
a_ : int = len(__A ) if (len(__A ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(__A ) , '''Postfix'''.center(__A ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__A ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__A ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__A ) == 0:
stack.append(__A ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__A ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__A ) # push x to stack
print(
x.center(8 ) , (''''''.join(__A )).ljust(__A ) , (''''''.join(__A )).ljust(__A ) , sep=''' | ''' , ) # Output in tabular format
while len(__A ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(__A )).ljust(__A ) , (''''''.join(__A )).ljust(__A ) , sep=''' | ''' , ) # Output in tabular format
return "".join(__A ) # return Postfix as str
def _UpperCAmelCase ( __A : Tuple ):
a_ : Union[str, Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__A ) ):
if infix[i] == "(":
a_ : List[str] = ''')''' # change "(" to ")"
elif infix[i] == ")":
a_ : str = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(__A ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__lowerCAmelCase = input('\nEnter an Infix Equation = ') # Input an Infix equation
__lowerCAmelCase = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 466
| 0
|
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: Tuple , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[Any] ) -> Union[str, Any]:
_UpperCAmelCase : List[str] = multiprocessing.Manager()
_UpperCAmelCase : Optional[Any] = manager.list()
_UpperCAmelCase : int = multiprocessing.Process(target=lowerCAmelCase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("timed out" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[int] , lowerCAmelCase: int , lowerCAmelCase: str ) -> Tuple:
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_UpperCAmelCase : Dict = shutil.rmtree
_UpperCAmelCase : Tuple = os.rmdir
_UpperCAmelCase : int = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_UpperCAmelCase : Union[str, Any] = {}
with swallow_io():
with time_limit(lowerCAmelCase ):
exec(lowerCAmelCase , lowerCAmelCase )
result.append("passed" )
except TimeoutException:
result.append("timed out" )
except BaseException as e:
result.append(F'failed: {e}' )
# Needed for cleaning up.
_UpperCAmelCase : str = rmtree
_UpperCAmelCase : int = rmdir
_UpperCAmelCase : List[str] = chdir
@contextlib.contextmanager
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str ) -> List[str]:
def signal_handler(lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Union[str, Any] ):
raise TimeoutException("Timed out!" )
signal.setitimer(signal.ITIMER_REAL , lowerCAmelCase )
signal.signal(signal.SIGALRM , lowerCAmelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
_UpperCAmelCase : List[str] = WriteOnlyStringIO()
with contextlib.redirect_stdout(lowerCAmelCase ):
with contextlib.redirect_stderr(lowerCAmelCase ):
with redirect_stdin(lowerCAmelCase ):
yield
@contextlib.contextmanager
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
with tempfile.TemporaryDirectory() as dirname:
with chdir(lowerCAmelCase ):
yield dirname
class a ( UpperCAmelCase ):
pass
class a ( io.StringIO ):
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
raise OSError
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
raise OSError
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
raise OSError
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return False
class a ( contextlib._RedirectStream ): # type: ignore
_lowercase = "stdin"
@contextlib.contextmanager
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, Any] ) -> int:
if root == ".":
yield
return
_UpperCAmelCase : Union[str, Any] = os.getcwd()
os.chdir(lowerCAmelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[Any]=None ) -> Union[str, Any]:
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : Tuple = None
import os
_UpperCAmelCase : Optional[int] = "1"
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : int = None
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : str = None
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : str = None
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Any = None
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Any = None
_UpperCAmelCase : str = None
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Dict = None
_UpperCAmelCase : int = None
_UpperCAmelCase : Dict = None
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : List[str] = None
import shutil
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Any = None
_UpperCAmelCase : Optional[Any] = None
import subprocess
_UpperCAmelCase : Dict = None # type: ignore
_UpperCAmelCase : Optional[Any] = None
import sys
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : str = None
_UpperCAmelCase : str = None
_UpperCAmelCase : Tuple = None
| 717
|
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE_ = 'scheduler_config.json'
class a ( UpperCAmelCase ):
_lowercase = 1
_lowercase = 2
_lowercase = 3
_lowercase = 4
_lowercase = 5
@dataclass
class a ( UpperCAmelCase ):
_lowercase = 42
class a :
_lowercase = SCHEDULER_CONFIG_NAME
_lowercase = ["dtype"]
_lowercase = []
_lowercase = True
@classmethod
def _UpperCAmelCase ( cls , A_ = None , A_ = None , A_=False , **A_ , ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : str = cls.load_config(
pretrained_model_name_or_path=A_ , subfolder=A_ , return_unused_kwargs=A_ , **A_ , )
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = cls.from_config(A_ , return_unused_kwargs=A_ , **A_ )
if hasattr(A_ , "create_state" ) and getattr(A_ , "has_state" , A_ ):
_UpperCAmelCase : Union[str, Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def _UpperCAmelCase ( self , A_ , A_ = False , **A_ ):
'''simple docstring'''
self.save_config(save_directory=A_ , push_to_hub=A_ , **A_ )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def _UpperCAmelCase ( cls ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) )
_UpperCAmelCase : Optional[Any] = importlib.import_module(__name__.split("." )[0] )
_UpperCAmelCase : Dict = [
getattr(A_ , A_ ) for c in compatible_classes_str if hasattr(A_ , A_ )
]
return compatible_classes
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: jnp.ndarray , lowerCAmelCase: Tuple[int] ) -> jnp.ndarray:
assert len(lowerCAmelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCAmelCase ) - x.ndim) ) , lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: Tuple=0.999 , lowerCAmelCase: int=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(lowerCAmelCase: Union[str, Any] ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
_UpperCAmelCase : str = []
for i in range(lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = i / num_diffusion_timesteps
_UpperCAmelCase : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCAmelCase ) / alpha_bar(lowerCAmelCase ) , lowerCAmelCase ) )
return jnp.array(lowerCAmelCase , dtype=lowerCAmelCase )
@flax.struct.dataclass
class a :
_lowercase = 42
_lowercase = 42
_lowercase = 42
@classmethod
def _UpperCAmelCase ( cls , A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = scheduler.config
if config.trained_betas is not None:
_UpperCAmelCase : List[Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
_UpperCAmelCase : List[Any] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCAmelCase : List[str] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCAmelCase : str = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
_UpperCAmelCase : Optional[int] = 1.0 - betas
_UpperCAmelCase : int = jnp.cumprod(A_ , axis=0 )
return cls(
alphas=A_ , betas=A_ , alphas_cumprod=A_ , )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: CommonSchedulerState , lowerCAmelCase: jnp.ndarray , lowerCAmelCase: jnp.ndarray , lowerCAmelCase: jnp.ndarray ) -> Union[str, Any]:
_UpperCAmelCase : Optional[int] = state.alphas_cumprod
_UpperCAmelCase : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
_UpperCAmelCase : str = sqrt_alpha_prod.flatten()
_UpperCAmelCase : List[Any] = broadcast_to_shape_from_left(lowerCAmelCase , original_samples.shape )
_UpperCAmelCase : Optional[int] = (1 - alphas_cumprod[timesteps]) ** 0.5
_UpperCAmelCase : List[Any] = sqrt_one_minus_alpha_prod.flatten()
_UpperCAmelCase : int = broadcast_to_shape_from_left(lowerCAmelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: CommonSchedulerState , lowerCAmelCase: jnp.ndarray , lowerCAmelCase: jnp.ndarray , lowerCAmelCase: jnp.ndarray ) -> List[Any]:
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = get_sqrt_alpha_prod(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
_UpperCAmelCase : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: CommonSchedulerState , lowerCAmelCase: jnp.ndarray , lowerCAmelCase: jnp.ndarray , lowerCAmelCase: jnp.ndarray ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase : int = get_sqrt_alpha_prod(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
_UpperCAmelCase : Tuple = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 467
| 0
|
import numpy as np
import qiskit
def lowerCAmelCase( __lowerCamelCase = 8 , __lowerCamelCase = None ):
__a = np.random.default_rng(seed=__lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__a = 6 * key_len
# Measurement basis for Alice's qubits.
__a = rng.integers(2 , size=__lowerCamelCase )
# The set of states Alice will prepare.
__a = rng.integers(2 , size=__lowerCamelCase )
# Measurement basis for Bob's qubits.
__a = rng.integers(2 , size=__lowerCamelCase )
# Quantum Circuit to simulate BB84
__a = qiskit.QuantumCircuit(__lowerCamelCase , name='BB84' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(__lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(__lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(__lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(__lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(__lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__a = qiskit.Aer.get_backend('aer_simulator' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__a = qiskit.execute(__lowerCamelCase , __lowerCamelCase , shots=1 , seed_simulator=__lowerCamelCase )
# Returns the result of measurement.
__a = job.result().get_counts(__lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__a = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__a = gen_key[:key_len] if len(__lowerCamelCase ) >= key_len else gen_key.ljust(__lowerCamelCase , '0' )
return key
if __name__ == "__main__":
print(F'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 559
|
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowerCamelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=512,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def lowerCAmelCase( __lowerCamelCase ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f'''could not parse string as bool {string}''' )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
lowerCamelCase_ : str = parser.parse_args()
lowerCamelCase_ : str = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 559
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''BeitFeatureExtractor''']
UpperCAmelCase = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 565
|
from math import factorial
UpperCAmelCase = {str(d): factorial(d) for d in range(10)}
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
return sum(DIGIT_FACTORIAL[d] for d in str(__SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase_ ( ):
lowercase = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , __SCREAMING_SNAKE_CASE ) if sum_of_digit_factorial(__SCREAMING_SNAKE_CASE ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 565
| 1
|
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: Optional[Any] = """linear"""
SCREAMING_SNAKE_CASE_: Dict = """cosine"""
SCREAMING_SNAKE_CASE_: Tuple = """cosine_with_restarts"""
SCREAMING_SNAKE_CASE_: Dict = """polynomial"""
SCREAMING_SNAKE_CASE_: Optional[int] = """constant"""
SCREAMING_SNAKE_CASE_: str = """constant_with_warmup"""
SCREAMING_SNAKE_CASE_: Optional[int] = """piecewise_constant"""
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ = -1 ):
return LambdaLR(lowerCAmelCase__ ,lambda lowerCAmelCase__ : 1 ,last_epoch=lowerCAmelCase__ )
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = -1 ):
def lr_lambda(lowerCAmelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1.0 ,lowerCAmelCase__ ) )
return 1.0
return LambdaLR(lowerCAmelCase__ ,lowerCAmelCase__ ,last_epoch=lowerCAmelCase__ )
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = -1 ):
A__ = {}
A__ = step_rules.split(',' )
for rule_str in rule_list[:-1]:
A__ , A__ = rule_str.split(':' )
A__ = int(lowerCAmelCase__ )
A__ = float(lowerCAmelCase__ )
A__ = value
A__ = float(rule_list[-1] )
def create_rules_function(lowerCAmelCase__ ,lowerCAmelCase__ ):
def rule_func(lowerCAmelCase__ ) -> float:
A__ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A__ = create_rules_function(lowerCAmelCase__ ,lowerCAmelCase__ )
return LambdaLR(lowerCAmelCase__ ,lowerCAmelCase__ ,last_epoch=lowerCAmelCase__ )
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=-1 ):
def lr_lambda(lowerCAmelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 ,lowerCAmelCase__ ) )
return max(
0.0 ,float(num_training_steps - current_step ) / float(max(1 ,num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0.5 ,lowerCAmelCase__ = -1 ):
def lr_lambda(lowerCAmelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 ,lowerCAmelCase__ ) )
A__ = float(current_step - num_warmup_steps ) / float(max(1 ,num_training_steps - num_warmup_steps ) )
return max(0.0 ,0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase__ ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 1 ,lowerCAmelCase__ = -1 ):
def lr_lambda(lowerCAmelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 ,lowerCAmelCase__ ) )
A__ = float(current_step - num_warmup_steps ) / float(max(1 ,num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 ,0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase__ ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=1E-7 ,lowerCAmelCase__=1.0 ,lowerCAmelCase__=-1 ):
A__ = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(lowerCAmelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 ,lowerCAmelCase__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A__ = lr_init - lr_end
A__ = num_training_steps - num_warmup_steps
A__ = 1 - (current_step - num_warmup_steps) / decay_steps
A__ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = None ,lowerCAmelCase__ = None ,lowerCAmelCase__ = None ,lowerCAmelCase__ = 1 ,lowerCAmelCase__ = 1.0 ,lowerCAmelCase__ = -1 ,):
A__ = SchedulerType(lowerCAmelCase__ )
A__ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase__ ,last_epoch=lowerCAmelCase__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase__ ,step_rules=lowerCAmelCase__ ,last_epoch=lowerCAmelCase__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase__ ,num_warmup_steps=lowerCAmelCase__ ,last_epoch=lowerCAmelCase__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase__ ,num_warmup_steps=lowerCAmelCase__ ,num_training_steps=lowerCAmelCase__ ,num_cycles=lowerCAmelCase__ ,last_epoch=lowerCAmelCase__ ,)
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase__ ,num_warmup_steps=lowerCAmelCase__ ,num_training_steps=lowerCAmelCase__ ,power=lowerCAmelCase__ ,last_epoch=lowerCAmelCase__ ,)
return schedule_func(
lowerCAmelCase__ ,num_warmup_steps=lowerCAmelCase__ ,num_training_steps=lowerCAmelCase__ ,last_epoch=lowerCAmelCase__ )
| 260
|
"""simple docstring"""
class snake_case_ :
"""simple docstring"""
def __init__( self , __a ):
"""simple docstring"""
A__ = len(__a )
A__ = [0] * len_array
if len_array > 0:
A__ = array[0]
for i in range(1 , __a ):
A__ = self.prefix_sum[i - 1] + array[i]
def _UpperCAmelCase ( self , __a , __a ):
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 260
| 1
|
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def lowerCAmelCase_ ( UpperCamelCase__ : Callable , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ):
"""simple docstring"""
__lowercase = int(np.ceil((x_end - xa) / step_size ) )
__lowercase = np.zeros((n + 1,) )
__lowercase = ya
__lowercase = xa
for k in range(UpperCamelCase__ ):
__lowercase = y[k] + step_size * ode_func(UpperCamelCase__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowerCAmelCase_ ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=UpperCamelCase__ , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=UpperCamelCase__ , default=5 )
parser.add_argument("""--batch_size""" , type=UpperCamelCase__ , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=UpperCamelCase__ , default=1 )
parser.add_argument("""--freeze""" , type=UpperCamelCase__ , default=UpperCamelCase__ )
parser.add_argument("""--learning_rate""" , type=UpperCamelCase__ , default=5E-4 )
parser.add_argument("""--seed""" , type=UpperCamelCase__ , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=UpperCamelCase__ , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=UpperCamelCase__ , default=10 )
parser.add_argument("""--weight_decay""" , type=UpperCamelCase__ , default=0.01 )
parser.add_argument("""--output_dir""" , type=UpperCamelCase__ , default="""./results""" )
return parser.parse_args()
UpperCAmelCase__ =load("accuracy")
def lowerCAmelCase_ ( UpperCamelCase__ : List[Any] ):
"""simple docstring"""
__lowercase , __lowercase = eval_pred
__lowercase = np.argmax(UpperCamelCase__ , axis=1 )
return metric.compute(predictions=UpperCamelCase__ , references=UpperCamelCase__ )
class lowerCamelCase__ ( _a ):
def __init__( self : Any , A_ : int ):
'''simple docstring'''
super().__init__()
__lowercase = trainer
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , A_ : int , A_ : str , A_ : Any , **A_ : Optional[int] ):
'''simple docstring'''
if control.should_evaluate:
__lowercase = deepcopy(A_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def lowerCAmelCase_ ( ):
"""simple docstring"""
__lowercase = get_args()
set_seed(args.seed )
__lowercase = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
__lowercase = dataset.train_test_split(test_size=0.2 )
__lowercase = train_test["""test"""].train_test_split(test_size=0.5 )
__lowercase = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
__lowercase = AutoTokenizer.from_pretrained(args.model_ckpt )
__lowercase = tokenizer.eos_token
__lowercase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__lowercase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__lowercase = False
__lowercase = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(UpperCamelCase__ : Optional[Any] ):
__lowercase = tokenizer(example["""src"""] , truncation=UpperCamelCase__ , max_length=1024 )
__lowercase = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__lowercase = train_test_validation.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=train_test_validation["""train"""].column_names , )
__lowercase = DataCollatorWithPadding(tokenizer=UpperCamelCase__ )
__lowercase = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
__lowercase = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
print("""Training...""" )
trainer.add_callback(CustomCallback(UpperCamelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 442
| 0
|
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A_ : Optional[int] = random.Random()
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> Dict:
'''simple docstring'''
if rng is None:
__UpperCAmelCase = global_rng
__UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__=7 , lowercase__=400 , lowercase__=2_000 , lowercase__=1 , lowercase__=0.0 , lowercase__=16_000 , lowercase__=True , lowercase__=True , ) -> str:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = min_seq_length
__UpperCAmelCase = max_seq_length
__UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCAmelCase = feature_size
__UpperCAmelCase = padding_value
__UpperCAmelCase = sampling_rate
__UpperCAmelCase = return_attention_mask
__UpperCAmelCase = do_normalize
def lowerCAmelCase_ (self ) -> str:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase_ (self , lowercase__=False , lowercase__=False ) -> str:
def _flatten(lowercase__ ):
return list(itertools.chain(*A_ ) )
if equal_length:
__UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__UpperCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCAmelCase = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
class A_ ( _a , unittest.TestCase ):
'''simple docstring'''
a__ = WavaVecaFeatureExtractor
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = WavaVecaFeatureExtractionTester(self )
def lowerCAmelCase_ (self , lowercase__ ) -> int:
self.assertTrue(np.all(np.mean(A_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ , axis=0 ) - 1 ) < 1E-3 ) )
def lowerCAmelCase_ (self ) -> List[str]:
# Tests that all call wrap to encode_plus and batch_encode_plus
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__UpperCAmelCase = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test not batched input
__UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
__UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test batched
__UpperCAmelCase = feat_extract(A_ , return_tensors='''np''' ).input_values
__UpperCAmelCase = feat_extract(A_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__UpperCAmelCase = np.asarray(A_ )
__UpperCAmelCase = feat_extract(A_ , return_tensors='''np''' ).input_values
__UpperCAmelCase = feat_extract(A_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
__UpperCAmelCase = [None, 1_600, None]
for max_length, padding in zip(A_ , A_ ):
__UpperCAmelCase = feat_extract(A_ , padding=A_ , max_length=A_ , return_tensors='''np''' )
__UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase = range(800 , 1_400 , 200 )
__UpperCAmelCase = [floats_list((1, x) )[0] for x in lengths]
__UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
__UpperCAmelCase = [None, 1_600, None]
for max_length, padding in zip(A_ , A_ ):
__UpperCAmelCase = feat_extract(A_ , max_length=A_ , padding=A_ )
__UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__UpperCAmelCase = feat_extract(
A_ , truncation=A_ , max_length=1_000 , padding='''max_length''' , return_tensors='''np''' )
__UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__UpperCAmelCase = feat_extract(
A_ , truncation=A_ , max_length=1_000 , padding='''longest''' , return_tensors='''np''' )
__UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
__UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__UpperCAmelCase = feat_extract(
A_ , truncation=A_ , max_length=2_000 , padding='''longest''' , return_tensors='''np''' )
__UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
@require_torch
def lowerCAmelCase_ (self ) -> Union[str, Any]:
import torch
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase = np.random.rand(100 ).astype(np.floataa )
__UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def lowerCAmelCase_ (self ) -> Optional[int]:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
__UpperCAmelCase = WavaVecaConfig.from_pretrained(A_ )
__UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
| 303
|
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__magic_name__ : Optional[Any] = 'http://www.mocksite.com/file1.txt'
__magic_name__ : Tuple = '"text": ["foo", "foo"]'
__magic_name__ : str = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class __snake_case :
__a = 200
__a = {'''Content-Length''': '''100'''}
__a = {}
def __a ( self: List[str] , **A_: List[Any] ):
return [bytes(A_ , """utf-8""" )]
def a_ ( *lowercase__ :List[Any], **lowercase__ :Any ):
return MockResponse()
@pytest.mark.parametrize("""urls_type""", [str, list, dict] )
def a_ ( lowercase__ :Optional[int], lowercase__ :Any, lowercase__ :Optional[int] ):
import requests
monkeypatch.setattr(lowercase__, """request""", lowercase__ )
__lowerCamelCase = URL
if issubclass(lowercase__, lowercase__ ):
__lowerCamelCase = url
elif issubclass(lowercase__, lowercase__ ):
__lowerCamelCase = [url]
elif issubclass(lowercase__, lowercase__ ):
__lowerCamelCase = {"""train""": url}
__lowerCamelCase = """dummy"""
__lowerCamelCase = """downloads"""
__lowerCamelCase = tmp_path
__lowerCamelCase = DownloadConfig(
cache_dir=os.path.join(lowercase__, lowercase__ ), use_etag=lowercase__, )
__lowerCamelCase = DownloadManager(dataset_name=lowercase__, download_config=lowercase__ )
__lowerCamelCase = dl_manager.download(lowercase__ )
__lowerCamelCase = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowercase__, lowercase__ ):
__lowerCamelCase = [downloaded_paths]
__lowerCamelCase = [urls]
elif isinstance(lowercase__, lowercase__ ):
assert "train" in downloaded_paths.keys()
__lowerCamelCase = downloaded_paths.values()
__lowerCamelCase = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowercase__, lowercase__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__lowerCamelCase = Path(lowercase__ )
__lowerCamelCase = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__lowerCamelCase = downloaded_path.read_text()
assert content == CONTENT
__lowerCamelCase = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
__lowerCamelCase = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""", [str, list, dict] )
def a_ ( lowercase__ :Dict, lowercase__ :Optional[Any], lowercase__ :Dict ):
__lowerCamelCase = str(lowercase__ )
if issubclass(lowercase__, lowercase__ ):
__lowerCamelCase = filename
elif issubclass(lowercase__, lowercase__ ):
__lowerCamelCase = [filename]
elif issubclass(lowercase__, lowercase__ ):
__lowerCamelCase = {"""train""": filename}
__lowerCamelCase = """dummy"""
__lowerCamelCase = xz_file.parent
__lowerCamelCase = """extracted"""
__lowerCamelCase = DownloadConfig(
cache_dir=lowercase__, use_etag=lowercase__, )
__lowerCamelCase = DownloadManager(dataset_name=lowercase__, download_config=lowercase__ )
__lowerCamelCase = dl_manager.extract(lowercase__ )
__lowerCamelCase = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowercase__, lowercase__ ):
__lowerCamelCase = [extracted_paths]
__lowerCamelCase = [paths]
elif isinstance(lowercase__, lowercase__ ):
assert "train" in extracted_paths.keys()
__lowerCamelCase = extracted_paths.values()
__lowerCamelCase = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowercase__, lowercase__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__lowerCamelCase = Path(lowercase__ )
__lowerCamelCase = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowercase__, etag=lowercase__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__lowerCamelCase = extracted_path.read_text()
__lowerCamelCase = text_file.read_text()
assert extracted_file_content == expected_file_content
def a_ ( lowercase__ :List[str], lowercase__ :int ):
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(lowercase__, start=1 ):
__lowerCamelCase = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""", ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def a_ ( lowercase__ :Optional[int], lowercase__ :Union[str, Any] ):
__lowerCamelCase = request.getfixturevalue(lowercase__ )
__lowerCamelCase = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowercase__ ), start=1 ):
_test_jsonl(lowercase__, lowercase__ )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""", ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def a_ ( lowercase__ :Optional[int], lowercase__ :List[Any] ):
__lowerCamelCase = request.getfixturevalue(lowercase__ )
__lowerCamelCase = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowercase__ ), start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowercase__ ), start=1 ):
_test_jsonl(lowercase__, lowercase__ )
assert num_tar == 1
assert num_jsonl == 2
def a_ ( lowercase__ :Tuple ):
__lowerCamelCase = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowercase__ ), start=1 ):
assert os.path.basename(lowercase__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 281
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715
|
'''simple docstring'''
def _a( UpperCamelCase__ : str, UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =[[False for _ in range(m + 1 )] for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE__ : List[Any] =True
for i in range(UpperCamelCase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
SCREAMING_SNAKE_CASE__ : Optional[int] =True
if a[i].islower():
SCREAMING_SNAKE_CASE__ : List[Any] =True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665
| 0
|
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 52
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : List[Any] ={'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] =['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str =['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] =[
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 428
| 0
|
def UpperCamelCase_( __magic_name__ : str ):
"""simple docstring"""
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def UpperCamelCase_( __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Dict = credit_card_number
_lowerCAmelCase :str = 0
_lowerCAmelCase :str = len(__magic_name__ ) - 2
for i in range(__magic_name__ , -1 , -2 ):
# double the value of every second digit
_lowerCAmelCase :List[Any] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_lowerCAmelCase :List[str] = cc_number[:i] + str(__magic_name__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__magic_name__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def UpperCamelCase_( __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Dict = f"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(f"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(__magic_name__ ) <= 16:
print(f"""{error_message} of its length.""" )
return False
if not validate_initial_digits(__magic_name__ ):
print(f"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(__magic_name__ ):
print(f"""{error_message} it fails the Luhn check.""" )
return False
print(f"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 709
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
a = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
a = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
a = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 382
| 0
|
import random
def lowerCAmelCase_ (lowercase__ : list , lowercase__ : int ) -> tuple:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = [], [], []
for element in data:
if element < pivot:
less.append(lowercase__ )
elif element > pivot:
greater.append(lowercase__ )
else:
equal.append(lowercase__ )
return less, equal, greater
def lowerCAmelCase_ (lowercase__ : list , lowercase__ : int ) -> Dict:
'''simple docstring'''
if index >= len(lowercase__ ) or index < 0:
return None
lowerCAmelCase__ = items[random.randint(0 , len(lowercase__ ) - 1 )]
lowerCAmelCase__ = 0
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _partition(lowercase__ , lowercase__ )
lowerCAmelCase__ = len(lowercase__ )
lowerCAmelCase__ = len(lowercase__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(lowercase__ , lowercase__ )
# must be in larger
else:
return quick_select(lowercase__ , index - (m + count) )
| 668
|
from collections import deque
class lowerCAmelCase_ :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = process_name # process name
lowerCAmelCase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCAmelCase__ = arrival_time
lowerCAmelCase__ = burst_time # remaining burst time
lowerCAmelCase__ = 0 # total time of the process wait in ready queue
lowerCAmelCase__ = 0 # time from arrival time to completion time
class lowerCAmelCase_ :
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int , ):
# total number of mlfq's queues
lowerCAmelCase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCAmelCase__ = time_slices
# unfinished process is in this ready_queue
lowerCAmelCase__ = queue
# current time
lowerCAmelCase__ = current_time
# finished process is in this sequence queue
lowerCAmelCase__ = deque()
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : list[Process] ):
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : list[Process] ):
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : list[Process] ):
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : deque[Process] ):
return [q.burst_time for q in queue]
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : deque[Process] ):
lowerCAmelCase__ = deque() # sequence deque of finished process
while len(SCREAMING_SNAKE_CASE_ ) != 0:
lowerCAmelCase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(SCREAMING_SNAKE_CASE_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCAmelCase__ = 0
# set the process's turnaround time because it is finished
lowerCAmelCase__ = self.current_time - cp.arrival_time
# set the completion time
lowerCAmelCase__ = self.current_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE_ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(SCREAMING_SNAKE_CASE_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCAmelCase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(SCREAMING_SNAKE_CASE_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCAmelCase__ = 0
# set the finish time
lowerCAmelCase__ = self.current_time
# update the process' turnaround time because it is finished
lowerCAmelCase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE_ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __snake_case ( self : int ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_UpperCAmelCase : List[Any] = Process("P1", 0, 53)
_UpperCAmelCase : Tuple = Process("P2", 0, 17)
_UpperCAmelCase : int = Process("P3", 0, 68)
_UpperCAmelCase : str = Process("P4", 0, 24)
_UpperCAmelCase : Tuple = 3
_UpperCAmelCase : List[Any] = [17, 25]
_UpperCAmelCase : Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
_UpperCAmelCase : Tuple = Process("P1", 0, 53)
_UpperCAmelCase : List[str] = Process("P2", 0, 17)
_UpperCAmelCase : Any = Process("P3", 0, 68)
_UpperCAmelCase : List[Any] = Process("P4", 0, 24)
_UpperCAmelCase : Optional[int] = 3
_UpperCAmelCase : int = [17, 25]
_UpperCAmelCase : str = deque([Pa, Pa, Pa, Pa])
_UpperCAmelCase : Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
_UpperCAmelCase : int = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 668
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase : List[Any] = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 425
|
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCAmelCase : Dict = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _A ( A ,A ,A ,A ,A ) -> str:
for attribute in key.split("." ):
lowercase : Any = getattr(A ,A )
if weight_type is not None:
lowercase : Optional[Any] = getattr(A ,A ).shape
else:
lowercase : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase : Any = value
elif weight_type == "weight_g":
lowercase : Optional[Any] = value
elif weight_type == "weight_v":
lowercase : Tuple = value
elif weight_type == "bias":
lowercase : int = value
else:
lowercase : int = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _A ( A ,A ) -> int:
lowercase : List[Any] = []
lowercase : int = fairseq_model.state_dict()
lowercase : Optional[Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
A ,A ,A ,A ,hf_model.config.feat_extract_norm == "group" ,)
lowercase : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase : Union[str, Any] = True
if "*" in mapped_key:
lowercase : Dict = name.split(A )[0].split("." )[-2]
lowercase : Union[str, Any] = mapped_key.replace("*" ,A )
if "weight_g" in name:
lowercase : Union[str, Any] = "weight_g"
elif "weight_v" in name:
lowercase : Tuple = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
lowercase : Union[str, Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase : Any = "weight"
else:
lowercase : Tuple = None
set_recursively(A ,A ,A ,A ,A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _A ( A ,A ,A ,A ,A ) -> Any:
lowercase : Optional[int] = full_name.split("conv_layers." )[-1]
lowercase : Any = name.split("." )
lowercase : Dict = int(items[0] )
lowercase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase : List[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(A )
@torch.no_grad()
def _A ( A ,A ,A=None ) -> Optional[Any]:
# load the pre-trained checkpoints
lowercase : Union[str, Any] = torch.load(A )
lowercase : List[Any] = WavLMConfigOrig(checkpoint["cfg"] )
lowercase : Tuple = WavLMOrig(A )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
lowercase : List[str] = WavLMConfig.from_pretrained(A )
else:
lowercase : Union[str, Any] = WavLMConfig()
lowercase : Optional[Any] = WavLMModel(A )
recursively_load_weights(A ,A )
hf_wavlm.save_pretrained(A )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCAmelCase : int = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 425
| 1
|
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''ybelkada/fonts'''
def snake_case_ ( ):
'''simple docstring'''
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def snake_case_ ( A_ : Tuple, A_ : List[Any], A_ : Union[str, Any] ):
'''simple docstring'''
requires_backends(A_, ['''torch'''] )
_check_torch_version()
_lowerCamelCase : Optional[Any] = image_tensor.unsqueeze(0 )
_lowerCamelCase : Optional[Any] = torch.nn.functional.unfold(A_, (patch_height, patch_width), stride=(patch_height, patch_width) )
_lowerCamelCase : Tuple = patches.reshape(image_tensor.size(0 ), image_tensor.size(1 ), A_, A_, -1 )
_lowerCamelCase : str = patches.permute(0, 4, 2, 3, 1 ).reshape(
image_tensor.size(2 ) // patch_height, image_tensor.size(3 ) // patch_width, image_tensor.size(1 ) * patch_height * patch_width, )
return patches.unsqueeze(0 )
def snake_case_ ( A_ : str, A_ : int = 36, A_ : str = "black", A_ : str = "white", A_ : int = 5, A_ : int = 5, A_ : int = 5, A_ : int = 5, A_ : Optional[bytes] = None, A_ : Optional[str] = None, ):
'''simple docstring'''
requires_backends(A_, '''vision''' )
# Add new lines so that each line is no more than 80 characters.
_lowerCamelCase : Optional[Any] = textwrap.TextWrapper(width=80 )
_lowerCamelCase : int = wrapper.wrap(text=A_ )
_lowerCamelCase : List[Any] = '''\n'''.join(A_ )
if font_bytes is not None and font_path is None:
_lowerCamelCase : Optional[int] = io.BytesIO(A_ )
elif font_path is not None:
_lowerCamelCase : Union[str, Any] = font_path
else:
_lowerCamelCase : str = hf_hub_download(A_, '''Arial.TTF''' )
_lowerCamelCase : Optional[Any] = ImageFont.truetype(A_, encoding='''UTF-8''', size=A_ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
_lowerCamelCase : Union[str, Any] = ImageDraw.Draw(Image.new('''RGB''', (1, 1), A_ ) )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = temp_draw.textbbox((0, 0), A_, A_ )
# Create the actual image with a bit of padding around the text.
_lowerCamelCase : int = text_width + left_padding + right_padding
_lowerCamelCase : Dict = text_height + top_padding + bottom_padding
_lowerCamelCase : int = Image.new('''RGB''', (image_width, image_height), A_ )
_lowerCamelCase : Optional[Any] = ImageDraw.Draw(A_ )
draw.text(xy=(left_padding, top_padding), text=A_, fill=A_, font=A_ )
return image
def snake_case_ ( A_ : np.ndarray, A_ : str, **A_ : Optional[int] ):
'''simple docstring'''
requires_backends(A_, '''vision''' )
# Convert to PIL image if necessary
_lowerCamelCase : List[str] = to_pil_image(A_ )
_lowerCamelCase : Union[str, Any] = render_text(A_, **A_ )
_lowerCamelCase : int = max(header_image.width, image.width )
_lowerCamelCase : List[Any] = int(image.height * (new_width / image.width) )
_lowerCamelCase : Dict = int(header_image.height * (new_width / header_image.width) )
_lowerCamelCase : List[Any] = Image.new('''RGB''', (new_width, new_height + new_header_height), '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ), (0, 0) )
new_image.paste(image.resize((new_width, new_height) ), (0, new_header_height) )
# Convert back to the original framework if necessary
_lowerCamelCase : Optional[int] = to_numpy_array(A_ )
if infer_channel_dimension_format(A_ ) == ChannelDimension.LAST:
_lowerCamelCase : int = to_channel_dimension_format(A_, ChannelDimension.LAST )
return new_image
class __snake_case ( _lowercase):
snake_case__ : str = ["flattened_patches"]
def __init__( self : str , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : int = 2_0_4_8 , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
_lowerCamelCase : List[str] = do_normalize
_lowerCamelCase : Dict = do_convert_rgb
_lowerCamelCase : Union[str, Any] = max_patches
_lowerCamelCase : Any = is_vqa
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : int , __lowerCAmelCase : dict , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self.extract_flattened_patches , '''torch''' )
_check_torch_version()
# convert to torch
_lowerCamelCase : Any = to_channel_dimension_format(__lowerCAmelCase , ChannelDimension.FIRST )
_lowerCamelCase : int = torch.from_numpy(__lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = patch_size['''height'''], patch_size['''width''']
_lowerCamelCase , _lowerCamelCase : Dict = get_image_size(__lowerCAmelCase )
# maximize scale s.t.
_lowerCamelCase : Any = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
_lowerCamelCase : str = max(min(math.floor(scale * image_height / patch_height ) , __lowerCAmelCase ) , 1 )
_lowerCamelCase : Optional[int] = max(min(math.floor(scale * image_width / patch_width ) , __lowerCAmelCase ) , 1 )
_lowerCamelCase : List[Any] = max(num_feasible_rows * patch_height , 1 )
_lowerCamelCase : str = max(num_feasible_cols * patch_width , 1 )
_lowerCamelCase : Optional[Any] = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='''bilinear''' , align_corners=__lowerCAmelCase , antialias=__lowerCAmelCase , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
_lowerCamelCase : int = torch_extract_patches(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = patches.shape
_lowerCamelCase : int = patches_shape[1]
_lowerCamelCase : Union[str, Any] = patches_shape[2]
_lowerCamelCase : Optional[int] = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
_lowerCamelCase : List[Any] = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
_lowerCamelCase : List[Any] = torch.arange(__lowerCAmelCase ).reshape([rows, 1] ).repeat(1 , __lowerCAmelCase ).reshape([rows * columns, 1] )
_lowerCamelCase : Any = torch.arange(__lowerCAmelCase ).reshape([1, columns] ).repeat(__lowerCAmelCase , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
_lowerCamelCase : int = row_ids.to(torch.floataa )
_lowerCamelCase : str = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
_lowerCamelCase : Any = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
_lowerCamelCase : Any = torch.nn.functional.pad(__lowerCAmelCase , [0, 0, 0, max_patches - (rows * columns)] ).float()
_lowerCamelCase : Any = to_numpy_array(__lowerCAmelCase )
return result
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Dict ):
"""simple docstring"""
if image.dtype == np.uinta:
_lowerCamelCase : Tuple = image.astype(np.floataa )
# take mean across the whole `image`
_lowerCamelCase : List[str] = np.mean(__lowerCAmelCase )
_lowerCamelCase : Dict = np.std(__lowerCAmelCase )
_lowerCamelCase : Tuple = max(__lowerCAmelCase , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : ImageInput , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[Dict[str, int]] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCAmelCase : Any , ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCamelCase : str = patch_size if patch_size is not None else self.patch_size
_lowerCamelCase : List[Any] = max_patches if max_patches is not None else self.max_patches
_lowerCamelCase : Any = self.is_vqa
if kwargs.get('''data_format''' , __lowerCAmelCase ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
_lowerCamelCase : Tuple = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCamelCase : Optional[Any] = [convert_to_rgb(__lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
_lowerCamelCase : Optional[Any] = [to_numpy_array(__lowerCAmelCase ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
_lowerCamelCase : str = kwargs.pop('''font_bytes''' , __lowerCAmelCase )
_lowerCamelCase : int = kwargs.pop('''font_path''' , __lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : Optional[Any] = [header_text] * len(__lowerCAmelCase )
_lowerCamelCase : Dict = [
render_header(__lowerCAmelCase , header_text[i] , font_bytes=__lowerCAmelCase , font_path=__lowerCAmelCase )
for i, image in enumerate(__lowerCAmelCase )
]
if do_normalize:
_lowerCamelCase : Dict = [self.normalize(image=__lowerCAmelCase ) for image in images]
# convert to torch tensor and permute
_lowerCamelCase : Optional[int] = [
self.extract_flattened_patches(image=__lowerCAmelCase , max_patches=__lowerCAmelCase , patch_size=__lowerCAmelCase )
for image in images
]
# create attention mask in numpy
_lowerCamelCase : Union[str, Any] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
_lowerCamelCase : str = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} , tensor_type=__lowerCAmelCase )
return encoded_outputs
| 83
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''', [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
], )
def snake_case_ ( A_ : Dict, A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : int = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''', '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
_lowerCamelCase : str = DatasetInfosDict.from_directory(A_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''', [
DatasetInfo(),
DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, ),
], )
def snake_case_ ( A_ : str, A_ : DatasetInfo ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = str(A_ )
dataset_info.write_to_directory(A_ )
_lowerCamelCase : str = DatasetInfo.from_directory(A_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(A_, '''dataset_info.json''' ) )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = DatasetInfo(
description='''foo''', citation='''bar''', homepage='''https://foo.bar''', license='''CC0''', features=Features({'''a''': Value('''int32''' )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train''', '''num_examples''': 42}], download_checksums={}, download_size=13_37, post_processing_size=4_42, dataset_size=12_34, size_in_bytes=13_37 + 4_42 + 12_34, )
_lowerCamelCase : Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(A_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
_lowerCamelCase : str = yaml.safe_dump(A_ )
_lowerCamelCase : Tuple = yaml.safe_load(A_ )
assert dataset_info_yaml_dict == reloaded
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : int = DatasetInfo()
_lowerCamelCase : Dict = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''', [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=13_37 ),
} ),
], )
def snake_case_ ( A_ : Optional[Any], A_ : DatasetInfosDict ):
'''simple docstring'''
_lowerCamelCase : List[str] = str(A_ )
dataset_infos_dict.write_to_directory(A_ )
_lowerCamelCase : List[Any] = DatasetInfosDict.from_directory(A_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_lowerCamelCase : str = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_lowerCamelCase : Any = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(A_, '''README.md''' ) )
| 83
| 1
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__A : Optional[int] = True
except (ImportError, AttributeError):
__A : Union[str, Any] = object
def UpperCAmelCase ( *lowerCamelCase_ :List[str] , **lowerCamelCase_ :Optional[int] ):
'''simple docstring'''
pass
__A : Any = False
__A : int = logging.get_logger('transformers-cli/serving')
def UpperCAmelCase ( lowerCamelCase_ :Tuple ):
'''simple docstring'''
snake_case_ : Optional[int] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(__SCREAMING_SNAKE_CASE , args.host , args.port , args.workers )
class __UpperCamelCase ( lowercase__ ):
lowercase : dict
class __UpperCamelCase ( lowercase__ ):
lowercase : List[str]
lowercase : Optional[List[int]]
class __UpperCamelCase ( lowercase__ ):
lowercase : str
class __UpperCamelCase ( lowercase__ ):
lowercase : Any
class __UpperCamelCase ( lowercase__ ):
@staticmethod
def a__ ( _UpperCamelCase :int ):
snake_case_ : Optional[Any] = parser.add_parser(
"""serve""" ,help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" ,type=_UpperCamelCase ,choices=get_supported_tasks() ,help="""The task to run the pipeline on""" ,)
serve_parser.add_argument("""--host""" ,type=_UpperCamelCase ,default="""localhost""" ,help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" ,type=_UpperCamelCase ,default=8_8_8_8 ,help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" ,type=_UpperCamelCase ,default=1 ,help="""Number of http workers""" )
serve_parser.add_argument("""--model""" ,type=_UpperCamelCase ,help="""Model's name or path to stored model.""" )
serve_parser.add_argument("""--config""" ,type=_UpperCamelCase ,help="""Model's config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" ,type=_UpperCamelCase ,help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" ,type=_UpperCamelCase ,default=-1 ,help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" ,)
serve_parser.set_defaults(func=_UpperCamelCase )
def __init__( self :Optional[int] ,_UpperCamelCase :Tuple ,_UpperCamelCase :str ,_UpperCamelCase :Any ,_UpperCamelCase :Union[str, Any] ):
snake_case_ : Tuple = pipeline
snake_case_ : List[Any] = host
snake_case_ : Tuple = port
snake_case_ : Optional[int] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(F'''Serving model over {host}:{port}''' )
snake_case_ : Union[str, Any] = FastAPI(
routes=[
APIRoute(
"""/""" ,self.model_info ,response_model=_UpperCamelCase ,response_class=_UpperCamelCase ,methods=["""GET"""] ,),
APIRoute(
"""/tokenize""" ,self.tokenize ,response_model=_UpperCamelCase ,response_class=_UpperCamelCase ,methods=["""POST"""] ,),
APIRoute(
"""/detokenize""" ,self.detokenize ,response_model=_UpperCamelCase ,response_class=_UpperCamelCase ,methods=["""POST"""] ,),
APIRoute(
"""/forward""" ,self.forward ,response_model=_UpperCamelCase ,response_class=_UpperCamelCase ,methods=["""POST"""] ,),
] ,timeout=6_0_0 ,)
def a__ ( self :str ):
run(self._app ,host=self.host ,port=self.port ,workers=self.workers )
def a__ ( self :Optional[Any] ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def a__ ( self :Dict ,_UpperCamelCase :Optional[Any] = Body(_UpperCamelCase ,embed=_UpperCamelCase ) ,_UpperCamelCase :int = Body(_UpperCamelCase ,embed=_UpperCamelCase ) ):
try:
snake_case_ : Dict = self._pipeline.tokenizer.tokenize(_UpperCamelCase )
if return_ids:
snake_case_ : Optional[Any] = self._pipeline.tokenizer.convert_tokens_to_ids(_UpperCamelCase )
return ServeTokenizeResult(tokens=_UpperCamelCase ,tokens_ids=_UpperCamelCase )
else:
return ServeTokenizeResult(tokens=_UpperCamelCase )
except Exception as e:
raise HTTPException(status_code=5_0_0 ,detail={"""model""": """""", """error""": str(_UpperCamelCase )} )
def a__ ( self :List[Any] ,_UpperCamelCase :Optional[int] = Body(_UpperCamelCase ,embed=_UpperCamelCase ) ,_UpperCamelCase :Union[str, Any] = Body(_UpperCamelCase ,embed=_UpperCamelCase ) ,_UpperCamelCase :List[str] = Body(_UpperCamelCase ,embed=_UpperCamelCase ) ,):
try:
snake_case_ : Tuple = self._pipeline.tokenizer.decode(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
return ServeDeTokenizeResult(model="""""" ,text=_UpperCamelCase )
except Exception as e:
raise HTTPException(status_code=5_0_0 ,detail={"""model""": """""", """error""": str(_UpperCamelCase )} )
async def a__ ( self :Optional[Any] ,_UpperCamelCase :Dict=Body(_UpperCamelCase ,embed=_UpperCamelCase ) ):
# Check we don't have empty string
if len(_UpperCamelCase ) == 0:
return ServeForwardResult(output=[] ,attention=[] )
try:
# Forward through the model
snake_case_ : List[Any] = self._pipeline(_UpperCamelCase )
return ServeForwardResult(output=_UpperCamelCase )
except Exception as e:
raise HTTPException(5_0_0 ,{"""error""": str(_UpperCamelCase )} )
| 701
|
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __UpperCamelCase :
def __init__( self :Union[str, Any] ,_UpperCamelCase :int ,_UpperCamelCase :str=1_3 ,_UpperCamelCase :Tuple=7 ,_UpperCamelCase :Tuple=False ,_UpperCamelCase :Tuple=True ,_UpperCamelCase :Union[str, Any]=False ,_UpperCamelCase :int=True ,_UpperCamelCase :List[str]=3_3 ,_UpperCamelCase :Any=3_2 ,_UpperCamelCase :Any=5 ,_UpperCamelCase :List[str]=4 ,_UpperCamelCase :Tuple=3_7 ,_UpperCamelCase :Optional[Any]="gelu" ,_UpperCamelCase :Any=0.1 ,_UpperCamelCase :List[Any]=0.1 ,_UpperCamelCase :Any=5_1_2 ,_UpperCamelCase :Tuple=1_6 ,_UpperCamelCase :Any=2 ,_UpperCamelCase :Optional[Any]=0.02 ,_UpperCamelCase :List[str]=3 ,_UpperCamelCase :Union[str, Any]=4 ,_UpperCamelCase :Dict=None ,):
snake_case_ : Tuple = parent
snake_case_ : List[str] = batch_size
snake_case_ : List[str] = seq_length
snake_case_ : Any = is_training
snake_case_ : List[Any] = use_input_mask
snake_case_ : int = use_token_type_ids
snake_case_ : Optional[int] = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : Dict = hidden_size
snake_case_ : Optional[int] = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : int = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : int = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : Any = num_labels
snake_case_ : Any = num_choices
snake_case_ : Tuple = scope
def a__ ( self :str ):
snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case_ : str = None
if self.use_input_mask:
snake_case_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Dict = None
snake_case_ : List[str] = None
snake_case_ : Tuple = None
if self.use_labels:
snake_case_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size] ,self.num_choices )
snake_case_ : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self :Optional[Any] ):
return EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def a__ ( self :str ,_UpperCamelCase :List[Any] ,_UpperCamelCase :List[str] ,_UpperCamelCase :Any ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :List[Any] ,_UpperCamelCase :Optional[int] ):
snake_case_ : Union[str, Any] = EsmModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ : int = model(_UpperCamelCase ,attention_mask=_UpperCamelCase )
snake_case_ : str = model(_UpperCamelCase )
snake_case_ : Optional[int] = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def a__ ( self :Any ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Any ,_UpperCamelCase :Dict ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Optional[int] ):
snake_case_ : str = EsmForMaskedLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ : Dict = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self :Optional[int] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :List[str] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Any ):
snake_case_ : List[Any] = self.num_labels
snake_case_ : int = EsmForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ : int = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self :Union[str, Any] ):
snake_case_ : Dict = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) : Optional[int] = config_and_inputs
snake_case_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
lowercase : Optional[int] = False
lowercase : List[str] = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase : int = ()
lowercase : List[str] = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase : str = True
def a__ ( self :Any ):
snake_case_ : Any = EsmModelTester(self )
snake_case_ : str = ConfigTester(self ,config_class=_UpperCamelCase ,hidden_size=3_7 )
def a__ ( self :Optional[Any] ):
self.config_tester.run_common_tests()
def a__ ( self :Optional[Any] ):
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def a__ ( self :Dict ):
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ : int = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def a__ ( self :Dict ):
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase )
def a__ ( self :List[str] ):
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
@slow
def a__ ( self :Union[str, Any] ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : int = EsmModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def a__ ( self :Tuple ):
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()[0]
snake_case_ : Optional[int] = EsmEmbeddings(config=_UpperCamelCase )
snake_case_ : List[Any] = torch.as_tensor([[1_2, 3_1, 1_3, model.padding_idx]] )
snake_case_ : Union[str, Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
snake_case_ : Optional[Any] = create_position_ids_from_input_ids(_UpperCamelCase ,model.padding_idx )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_UpperCamelCase ,_UpperCamelCase ) ) )
def a__ ( self :List[Any] ):
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
snake_case_ : List[Any] = EsmEmbeddings(config=_UpperCamelCase )
snake_case_ : Dict = torch.empty(2 ,4 ,3_0 )
snake_case_ : List[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
snake_case_ : Any = torch.as_tensor([expected_single_positions, expected_single_positions] )
snake_case_ : str = embeddings.create_position_ids_from_inputs_embeds(_UpperCamelCase )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_UpperCamelCase ,_UpperCamelCase ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self :Optional[Any] ):
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self :Optional[int] ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a__ ( self :Optional[int] ):
pass
@require_torch
class __UpperCamelCase ( lowercase__ ):
@slow
def a__ ( self :Any ):
with torch.no_grad():
snake_case_ : Optional[Any] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
snake_case_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
snake_case_ : Dict = model(_UpperCamelCase )[0]
snake_case_ : Optional[int] = 3_3
snake_case_ : List[Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape ,_UpperCamelCase )
snake_case_ : Union[str, Any] = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,_UpperCamelCase ,atol=1E-4 ) )
@slow
def a__ ( self :List[Any] ):
with torch.no_grad():
snake_case_ : List[Any] = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
snake_case_ : str = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
snake_case_ : Optional[Any] = model(_UpperCamelCase )[0]
# compare the actual values for a slice.
snake_case_ : List[str] = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,_UpperCamelCase ,atol=1E-4 ) )
| 267
| 0
|
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__A = logging.get_logger(__name__)
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = set()
lowerCAmelCase__ :str = []
def parse_line(_SCREAMING_SNAKE_CASE ):
for line in fp:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase__ :Any = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(__lowerCAmelCase ) > 0:
lowerCAmelCase__ :List[Any] = '\n'.join(__lowerCAmelCase )
# Only keep the warnings specified in `targets`
if any(F": {x}: " in warning for x in targets ):
selected_warnings.add(__lowerCAmelCase )
buffer.clear()
continue
else:
lowerCAmelCase__ :List[str] = line.strip()
buffer.append(__lowerCAmelCase )
if from_gh:
for filename in os.listdir(__lowerCAmelCase ):
lowerCAmelCase__ :Tuple = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if not os.path.isdir(__lowerCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(__lowerCAmelCase ) as fp:
parse_line(__lowerCAmelCase )
else:
try:
with zipfile.ZipFile(__lowerCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__lowerCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(__lowerCAmelCase ) as fp:
parse_line(__lowerCAmelCase )
except Exception:
logger.warning(
F"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." )
return selected_warnings
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = set()
lowerCAmelCase__ :Optional[int] = [os.path.join(__lowerCAmelCase , __lowerCAmelCase ) for p in os.listdir(__lowerCAmelCase ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(__lowerCAmelCase , __lowerCAmelCase ) )
return selected_warnings
if __name__ == "__main__":
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[Any]:
"""simple docstring"""
return values.split(',' )
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__A = parser.parse_args()
__A = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__A = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__A = extract_warnings(args.output_dir, args.targets)
__A = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 93
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = "openai/whisper-base"
_UpperCamelCase : List[Any] = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
_UpperCamelCase : Union[str, Any] = "transcriber"
_UpperCamelCase : Tuple = WhisperProcessor
_UpperCamelCase : Optional[Any] = WhisperForConditionalGeneration
_UpperCamelCase : Union[str, Any] = ["audio"]
_UpperCamelCase : Any = ["text"]
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
return self.pre_processor(lowerCamelCase_ , return_tensors='pt' ).input_features
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Any ):
'''simple docstring'''
return self.model.generate(inputs=lowerCamelCase_ )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )[0]
| 304
| 0
|
from typing import Dict, Optional
import numpy as np
import datasets
UpperCamelCase__ : List[Any] = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
UpperCamelCase__ : Tuple = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
UpperCamelCase__ : List[Any] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool , _SCREAMING_SNAKE_CASE : Optional[Dict[int, int]] = None , _SCREAMING_SNAKE_CASE : bool = False , ) -> int:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
SCREAMING_SNAKE_CASE_ = new_id
# turn into Numpy arrays
SCREAMING_SNAKE_CASE_ = np.array(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = np.array(_SCREAMING_SNAKE_CASE )
if reduce_labels:
SCREAMING_SNAKE_CASE_ = 255
SCREAMING_SNAKE_CASE_ = label - 1
SCREAMING_SNAKE_CASE_ = 255
SCREAMING_SNAKE_CASE_ = label != ignore_index
SCREAMING_SNAKE_CASE_ = np.not_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = pred_label[mask]
SCREAMING_SNAKE_CASE_ = np.array(_SCREAMING_SNAKE_CASE )[mask]
SCREAMING_SNAKE_CASE_ = pred_label[pred_label == label]
SCREAMING_SNAKE_CASE_ = np.histogram(_SCREAMING_SNAKE_CASE , bins=_SCREAMING_SNAKE_CASE , range=(0, num_labels - 1) )[0]
SCREAMING_SNAKE_CASE_ = np.histogram(_SCREAMING_SNAKE_CASE , bins=_SCREAMING_SNAKE_CASE , range=(0, num_labels - 1) )[0]
SCREAMING_SNAKE_CASE_ = np.histogram(_SCREAMING_SNAKE_CASE , bins=_SCREAMING_SNAKE_CASE , range=(0, num_labels - 1) )[0]
SCREAMING_SNAKE_CASE_ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool , _SCREAMING_SNAKE_CASE : Optional[Dict[int, int]] = None , _SCREAMING_SNAKE_CASE : bool = False , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = np.zeros((num_labels,) , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = np.zeros((num_labels,) , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = np.zeros((num_labels,) , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = intersect_and_union(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : bool , _SCREAMING_SNAKE_CASE : Optional[int] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[int, int]] = None , _SCREAMING_SNAKE_CASE : bool = False , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = total_intersect_and_union(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# compute metrics
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = total_area_intersect.sum() / total_area_label.sum()
SCREAMING_SNAKE_CASE_ = total_area_intersect / total_area_union
SCREAMING_SNAKE_CASE_ = total_area_intersect / total_area_label
SCREAMING_SNAKE_CASE_ = np.nanmean(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = np.nanmean(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = all_acc
SCREAMING_SNAKE_CASE_ = iou
SCREAMING_SNAKE_CASE_ = acc
if nan_to_num is not None:
SCREAMING_SNAKE_CASE_ = {metric: np.nan_to_num(_SCREAMING_SNAKE_CASE , nan=_SCREAMING_SNAKE_CASE ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCAmelCase__ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16'))),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16'))),
}) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A = None , _A = None , _A = False , ):
SCREAMING_SNAKE_CASE_ = mean_iou(
results=_A , gt_seg_maps=_A , num_labels=_A , ignore_index=_A , nan_to_num=_A , label_map=_A , reduce_labels=_A , )
return iou_result
| 711
|
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int = 200 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [1, 2, 5, 10, 20, 50, 100, 200]
SCREAMING_SNAKE_CASE_ = [0] * (pence + 1)
SCREAMING_SNAKE_CASE_ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_SCREAMING_SNAKE_CASE , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 620
| 0
|
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any=13 , lowerCAmelCase_ : Tuple=30 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=32 , lowerCAmelCase_ : str=5 , lowerCAmelCase_ : Tuple=4 , lowerCAmelCase_ : List[Any]=37 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Dict=10 , lowerCAmelCase_ : Any=0.02 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Any=0.6 , lowerCAmelCase_ : int=None , ) -> List[str]:
'''simple docstring'''
A__ : Optional[int] =parent
A__ : List[Any] =batch_size
A__ : Tuple =image_size
A__ : Optional[Any] =patch_size
A__ : Tuple =num_channels
A__ : Optional[Any] =is_training
A__ : Optional[int] =use_labels
A__ : Dict =hidden_size
A__ : Optional[int] =num_hidden_layers
A__ : str =num_attention_heads
A__ : List[str] =intermediate_size
A__ : Optional[Any] =hidden_act
A__ : str =hidden_dropout_prob
A__ : str =attention_probs_dropout_prob
A__ : Union[str, Any] =type_sequence_label_size
A__ : Dict =initializer_range
A__ : List[str] =mask_ratio
A__ : Optional[int] =scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A__ : Dict =(image_size // patch_size) ** 2
A__ : Union[str, Any] =int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
A__ : List[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : Union[str, Any] =None
if self.use_labels:
A__ : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : str =self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] ) -> List[Any]:
'''simple docstring'''
A__ : List[str] =ViTMAEModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ : Union[str, Any] =model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple ) -> List[str]:
'''simple docstring'''
A__ : str =ViTMAEForPreTraining(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ : int =model(__lowerCamelCase )
A__ : Dict =(self.image_size // self.patch_size) ** 2
A__ : Tuple =self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A__ : Dict =1
A__ : List[Any] =ViTMAEForPreTraining(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ : int =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ : Optional[Any] =model(__lowerCamelCase )
A__ : Optional[Any] =self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowercase__ ( self : List[str] ) -> int:
'''simple docstring'''
A__ : List[str] =self.prepare_config_and_inputs()
A__ , A__ , A__ : Union[str, Any] =config_and_inputs
A__ : int ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__snake_case = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__snake_case = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def lowercase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =ViTMAEModelTester(self )
A__ : Tuple =ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowercase__ ( self : Dict ) -> Dict:
'''simple docstring'''
pass
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
A__ , A__ : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Any =model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : Dict =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
A__ , A__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Dict =model_class(__lowerCamelCase )
A__ : Tuple =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] =[*signature.parameters.keys()]
A__ : Union[str, Any] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
A__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
np.random.seed(2 )
A__ : int =int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
A__ : Any =np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A__ : Dict =torch.from_numpy(__lowerCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A__ : Optional[int] =pt_noise
super().check_pt_tf_models(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
A__ , A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Tuple =model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A__ : List[Any] =model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
A__ : int =outputs[0].cpu().numpy()
A__ : Union[str, Any] =0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
A__ : Tuple =model_class.from_pretrained(__lowerCamelCase )
model.to(__lowerCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A__ : int =model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
# Make sure we don't have nans
A__ : List[str] =after_outputs[0].cpu().numpy()
A__ : List[str] =0
A__ : Dict =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCamelCase , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowercase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowercase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
pass
@slow
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : List[str] =ViTMAEModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : List[str] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
np.random.seed(2 )
A__ : List[str] =ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(__lowerCamelCase )
A__ : List[str] =self.default_image_processor
A__ : List[Any] =prepare_img()
A__ : str =image_processor(images=__lowerCamelCase , return_tensors="""pt""" ).to(__lowerCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A__ : List[str] =ViTMAEConfig()
A__ : Tuple =int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A__ : Tuple =np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
A__ : Optional[int] =model(**__lowerCamelCase , noise=torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase ) )
# verify the logits
A__ : Optional[Any] =torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A__ : int =torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__lowerCamelCase ) , atol=1e-4 ) )
| 215
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
_snake_case = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
_snake_case = '''The dog is cute and lives in the garden house'''
_snake_case = jnp.array([tokenizer.encode(__lowerCamelCase )] )
_snake_case = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
_snake_case = jnp.array(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
_snake_case = model(__lowerCamelCase )['''last_hidden_state''']
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , __lowerCamelCase , atol=1E-3 ) )
| 103
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : str = {
"""SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCamelCase__ ( __snake_case ):
__UpperCAmelCase = """deformable_detr"""
__UpperCAmelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=3 , lowerCAmelCase__=300 , lowerCAmelCase__=1_024 , lowerCAmelCase__=6 , lowerCAmelCase__=1_024 , lowerCAmelCase__=8 , lowerCAmelCase__=6 , lowerCAmelCase__=1_024 , lowerCAmelCase__=8 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__="relu" , lowerCAmelCase__=256 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1.0 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="sine" , lowerCAmelCase__="resnet50" , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=False , lowerCAmelCase__=300 , lowerCAmelCase__=False , lowerCAmelCase__=1 , lowerCAmelCase__=5 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=1 , lowerCAmelCase__=5 , lowerCAmelCase__=2 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.25 , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Optional[Any]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCamelCase :List[str] =CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase :int =backbone_config.get("""model_type""" )
_UpperCamelCase :Dict =CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase :Tuple =config_class.from_dict(lowerCAmelCase__ )
_UpperCamelCase :Tuple =use_timm_backbone
_UpperCamelCase :List[str] =backbone_config
_UpperCamelCase :Optional[int] =num_channels
_UpperCamelCase :int =num_queries
_UpperCamelCase :int =max_position_embeddings
_UpperCamelCase :Optional[Any] =d_model
_UpperCamelCase :Tuple =encoder_ffn_dim
_UpperCamelCase :Dict =encoder_layers
_UpperCamelCase :Union[str, Any] =encoder_attention_heads
_UpperCamelCase :str =decoder_ffn_dim
_UpperCamelCase :str =decoder_layers
_UpperCamelCase :str =decoder_attention_heads
_UpperCamelCase :Dict =dropout
_UpperCamelCase :Optional[int] =attention_dropout
_UpperCamelCase :Tuple =activation_dropout
_UpperCamelCase :Dict =activation_function
_UpperCamelCase :Any =init_std
_UpperCamelCase :Optional[Any] =init_xavier_std
_UpperCamelCase :Dict =encoder_layerdrop
_UpperCamelCase :int =auxiliary_loss
_UpperCamelCase :Union[str, Any] =position_embedding_type
_UpperCamelCase :List[Any] =backbone
_UpperCamelCase :Optional[int] =use_pretrained_backbone
_UpperCamelCase :List[Any] =dilation
# deformable attributes
_UpperCamelCase :Any =num_feature_levels
_UpperCamelCase :Union[str, Any] =encoder_n_points
_UpperCamelCase :Any =decoder_n_points
_UpperCamelCase :Optional[Any] =two_stage
_UpperCamelCase :List[Any] =two_stage_num_proposals
_UpperCamelCase :Optional[int] =with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
_UpperCamelCase :Union[str, Any] =class_cost
_UpperCamelCase :int =bbox_cost
_UpperCamelCase :List[Any] =giou_cost
# Loss coefficients
_UpperCamelCase :Optional[Any] =mask_loss_coefficient
_UpperCamelCase :Union[str, Any] =dice_loss_coefficient
_UpperCamelCase :List[Any] =bbox_loss_coefficient
_UpperCamelCase :int =giou_loss_coefficient
_UpperCamelCase :List[str] =eos_coefficient
_UpperCamelCase :Dict =focal_alpha
_UpperCamelCase :Union[str, Any] =disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.d_model
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase :int =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_UpperCamelCase :Union[str, Any] =self.backbone_config.to_dict()
_UpperCamelCase :Dict =self.__class__.model_type
return output
| 512
|
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def _lowerCAmelCase ( __a , __a , __a ) -> float:
'''simple docstring'''
_UpperCamelCase :Dict =x
_UpperCamelCase :Any =y
for step in range(__a ): # noqa: B007
_UpperCamelCase :List[Any] =a * a - b * b + x
_UpperCamelCase :Union[str, Any] =2 * a * b + y
_UpperCamelCase :str =a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _lowerCAmelCase ( __a ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _lowerCAmelCase ( __a ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(__a , 1 , 1 ) )
def _lowerCAmelCase ( __a = 8_00 , __a = 6_00 , __a = -0.6 , __a = 0 , __a = 3.2 , __a = 50 , __a = True , ) -> Image.Image:
'''simple docstring'''
_UpperCamelCase :List[str] =Image.new("""RGB""" , (image_width, image_height) )
_UpperCamelCase :Tuple =img.load()
# loop through the image-coordinates
for image_x in range(__a ):
for image_y in range(__a ):
# determine the figure-coordinates based on the image-coordinates
_UpperCamelCase :Tuple =figure_width / image_width * image_height
_UpperCamelCase :Union[str, Any] =figure_center_x + (image_x / image_width - 0.5) * figure_width
_UpperCamelCase :Dict =figure_center_y + (image_y / image_height - 0.5) * figure_height
_UpperCamelCase :Any =get_distance(__a , __a , __a )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_UpperCamelCase :Any =get_color_coded_rgb(__a )
else:
_UpperCamelCase :Any =get_black_and_white_rgb(__a )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowerCamelCase : str = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 512
| 1
|
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ) -> List[str]:
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] ) -> str:
_snake_case = tmp_path / '''cache'''
_snake_case = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case = ParquetDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_parquet_dataset(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Tuple:
_snake_case = tmp_path / '''cache'''
_snake_case = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_snake_case = features.copy() if features else default_expected_features
_snake_case = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case = ParquetDatasetReader(__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_parquet_dataset(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any ) -> Union[str, Any]:
_snake_case = tmp_path / '''cache'''
_snake_case = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_snake_case = ParquetDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase , split=__lowerCamelCase ).read()
_check_parquet_dataset(__lowerCamelCase , __lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : List[str] ) -> str:
if issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = parquet_path
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [parquet_path]
_snake_case = tmp_path / '''cache'''
_snake_case = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_snake_case = ParquetDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_parquet_dataset(__lowerCamelCase , __lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Dict=("train",) ) -> Optional[int]:
assert isinstance(__lowerCamelCase , __lowerCamelCase )
for split in splits:
_snake_case = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict ) -> int:
_snake_case = tmp_path / '''cache'''
_snake_case = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_parquet_datasetdict(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Any:
_snake_case = tmp_path / '''cache'''
_snake_case = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_snake_case = features.copy() if features else default_expected_features
_snake_case = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case = ParquetDatasetReader({'''train''': parquet_path} , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_parquet_datasetdict(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : str ) -> Optional[int]:
if split:
_snake_case = {split: parquet_path}
else:
_snake_case = '''train'''
_snake_case = {'''train''': parquet_path, '''test''': parquet_path}
_snake_case = tmp_path / '''cache'''
_snake_case = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_snake_case = ParquetDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_parquet_datasetdict(__lowerCamelCase , __lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : str ) -> Tuple:
_snake_case = ParquetDatasetWriter(__lowerCamelCase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
_snake_case = pq.ParquetFile(tmp_path / '''foo.parquet''' )
_snake_case = pf.read()
assert dataset.data.table == output_table
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Any ) -> Dict:
_snake_case = str(shared_datadir / '''test_image_rgb.jpg''' )
_snake_case = {'''image''': [image_path]}
_snake_case = Features({'''image''': Image()} )
_snake_case = Dataset.from_dict(__lowerCamelCase , features=__lowerCamelCase )
_snake_case = ParquetDatasetWriter(__lowerCamelCase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
_snake_case = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
_snake_case = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=__lowerCamelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ) -> Any:
assert get_writer_batch_size(__lowerCamelCase ) == expected
| 224
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] ) -> str:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Dict:
_snake_case = create_tensor(__lowerCamelCase )
_snake_case = gather(__lowerCamelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Tuple:
_snake_case = [state.process_index]
_snake_case = gather_object(__lowerCamelCase )
assert len(__lowerCamelCase ) == state.num_processes, f'''{gathered_obj}, {len(__lowerCamelCase )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def _UpperCAmelCase ( __lowerCamelCase : int ) -> Union[str, Any]:
_snake_case = create_tensor(__lowerCamelCase )
_snake_case = broadcast(__lowerCamelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _UpperCAmelCase ( __lowerCamelCase : str ) -> int:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
_snake_case = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_snake_case = torch.arange(state.num_processes ).to(state.device )
_snake_case = pad_across_processes(__lowerCamelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _UpperCAmelCase ( __lowerCamelCase : Any ) -> List[str]:
# For now runs on only two processes
if state.num_processes != 2:
return
_snake_case = create_tensor(__lowerCamelCase )
_snake_case = reduce(__lowerCamelCase , '''sum''' )
_snake_case = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), f'''{reduced_tensor} != {truth_tensor}'''
def _UpperCAmelCase ( __lowerCamelCase : int ) -> Optional[int]:
# For now runs on only two processes
if state.num_processes != 2:
return
_snake_case = create_tensor(__lowerCamelCase )
_snake_case = reduce(__lowerCamelCase , '''mean''' )
_snake_case = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), f'''{reduced_tensor} != {truth_tensor}'''
def _UpperCAmelCase ( __lowerCamelCase : Dict ) -> List[Any]:
# For xla_spawn (TPUs)
main()
def _UpperCAmelCase ( ) -> Optional[Any]:
_snake_case = PartialState()
state.print(f'''State: {state}''' )
state.print('''testing gather''' )
test_gather(__lowerCamelCase )
state.print('''testing gather_object''' )
test_gather_object(__lowerCamelCase )
state.print('''testing broadcast''' )
test_broadcast(__lowerCamelCase )
state.print('''testing pad_across_processes''' )
test_pad_across_processes(__lowerCamelCase )
state.print('''testing reduce_sum''' )
test_reduce_sum(__lowerCamelCase )
state.print('''testing reduce_mean''' )
test_reduce_mean(__lowerCamelCase )
if __name__ == "__main__":
main()
| 224
| 1
|
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowercase__ ( _UpperCamelCase) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase)
def lowercase__ ( _UpperCamelCase) -> Optional[int]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = emb.weight.shape
UpperCamelCase = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase)
UpperCamelCase = emb.weight.data
return lin_layer
def lowercase__ ( _UpperCamelCase , _UpperCamelCase="facebook/mbart-large-en-ro" , _UpperCamelCase=False , _UpperCamelCase=False) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = torch.load(_UpperCamelCase , map_location='cpu')['model']
remove_ignore_keys_(_UpperCamelCase)
UpperCamelCase = state_dict['encoder.embed_tokens.weight'].shape[0]
UpperCamelCase = MBartConfig.from_pretrained(_UpperCamelCase , vocab_size=_UpperCamelCase)
if mbart_aa and finetuned:
UpperCamelCase = 'relu'
UpperCamelCase = state_dict['decoder.embed_tokens.weight']
UpperCamelCase = MBartForConditionalGeneration(_UpperCamelCase)
model.model.load_state_dict(_UpperCamelCase)
if finetuned:
UpperCamelCase = make_linear_from_emb(model.model.shared)
return model
if __name__ == "__main__":
__magic_name__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
__magic_name__ : Optional[Any] = parser.parse_args()
__magic_name__ : Optional[int] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 714
|
from manim import *
class A__ ( __snake_case ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase = Rectangle(height=0.2_5 , width=0.2_5 )
UpperCamelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
UpperCamelCase = [mem.copy() for i in range(6 )]
UpperCamelCase = [mem.copy() for i in range(6 )]
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = Text('CPU' , font_size=24 )
UpperCamelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_SCREAMING_SNAKE_CASE )
UpperCamelCase = [mem.copy() for i in range(4 )]
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = Text('GPU' , font_size=24 )
UpperCamelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
gpu.move_to([-1, -1, 0] )
self.add(_SCREAMING_SNAKE_CASE )
UpperCamelCase = [mem.copy() for i in range(6 )]
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = Text('Model' , font_size=24 )
UpperCamelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.add(_SCREAMING_SNAKE_CASE )
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
rect.set_stroke(_SCREAMING_SNAKE_CASE )
UpperCamelCase = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(_SCREAMING_SNAKE_CASE , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=_SCREAMING_SNAKE_CASE )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_SCREAMING_SNAKE_CASE , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_SCREAMING_SNAKE_CASE , buff=0.0 )
self.add(_SCREAMING_SNAKE_CASE )
model_cpu_arr.append(_SCREAMING_SNAKE_CASE )
self.add(*_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
UpperCamelCase = [mem.copy() for i in range(6 )]
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = Text('Loaded Checkpoint' , font_size=24 )
UpperCamelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
checkpoint.move_to([3, 0.5, 0] )
self.add(_SCREAMING_SNAKE_CASE )
UpperCamelCase = []
UpperCamelCase = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = fill.copy().set_fill(_SCREAMING_SNAKE_CASE , opacity=0.7 )
target.move_to(_SCREAMING_SNAKE_CASE )
ckpt_arr.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_SCREAMING_SNAKE_CASE )
self.add(*_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
UpperCamelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(_SCREAMING_SNAKE_CASE , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_SCREAMING_SNAKE_CASE )
UpperCamelCase = MarkupText(
f'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
UpperCamelCase = [meta_mem.copy() for i in range(6 )]
UpperCamelCase = [meta_mem.copy() for i in range(6 )]
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = Text('Disk' , font_size=24 )
UpperCamelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
disk.move_to([-4.0, -1.2_5, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE , run_time=3 ) , Write(_SCREAMING_SNAKE_CASE , run_time=1 ) , Create(_SCREAMING_SNAKE_CASE , run_time=1 ) )
UpperCamelCase = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_SCREAMING_SNAKE_CASE , run_time=1.5 ) )
self.play(*_SCREAMING_SNAKE_CASE )
self.play(FadeOut(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = MarkupText(f'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE , run_time=3 ) )
self.play(
FadeOut(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) , )
self.wait()
| 410
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.