code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : Union[str, Any]):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = TFAutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = AutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Optional[int]):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = TFAutoModelForPreTraining.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = AutoModelForPreTraining.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Optional[Any]):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : Dict = TFAutoModelForCausalLM.from_pretrained(
SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : str = AutoModelForCausalLM.from_pretrained(
SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Dict):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Tuple = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Tuple):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Tuple = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = TFAutoModelForMaskedLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(
SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = AutoModelForMaskedLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : Tuple = AutoModelForMaskedLM.from_pretrained(
SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Optional[int]):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Tuple = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(
SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : int):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCamelCase : Dict = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : List[str]):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = TFAutoModelForQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = AutoModelForQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertEqual(model.num_parameters() ,1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE__) ,1_4_4_1_0)
__lowerCamelCase : Optional[Any] = AutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertEqual(model.num_parameters() ,1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE__) ,1_4_4_1_0)
def lowerCAmelCase ( self : int):
__lowerCamelCase : Tuple = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertEqual(model.num_parameters() ,1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE__) ,1_4_4_1_0)
__lowerCamelCase : int = AutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertEqual(model.num_parameters() ,1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE__) ,1_4_4_1_0)
| 652
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a =logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Path ,SCREAMING_SNAKE_CASE__ : Union[str, None] = None ,SCREAMING_SNAKE_CASE__ : Union[List[str], None] = None ,SCREAMING_SNAKE_CASE__ : Union[str, List[str], None] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
__lowerCamelCase : List[str] = [file for file in os.listdir(SCREAMING_SNAKE_CASE__) if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))]
if identifier is not None:
__lowerCamelCase : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
for n_ in n_identifier:
__lowerCamelCase : Optional[int] = [file for file in files if n_ not in file]
else:
__lowerCamelCase : Dict = [file for file in files if n_identifier not in file]
__lowerCamelCase : str = ignore_files or []
ignore_files.append('__init__.py')
__lowerCamelCase : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,SCREAMING_SNAKE_CASE__)
if only_modules:
__lowerCamelCase : Optional[int] = file.split('.')[0]
try:
__lowerCamelCase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = doctest.DocTestSuite(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = unittest.TextTestRunner().run(SCREAMING_SNAKE_CASE__)
self.assertIs(len(result.failures) ,0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__lowerCamelCase : int = doctest.testfile(str('..' / directory / file) ,optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed ,0)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Dict = Path('src/transformers')
__lowerCamelCase : Any = 'modeling'
__lowerCamelCase : Dict = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Tuple = Path('src/transformers')
__lowerCamelCase : Optional[int] = 'tokenization'
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : List[Any] = Path('src/transformers')
__lowerCamelCase : str = 'configuration'
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : Dict = Path('src/transformers')
__lowerCamelCase : Any = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,n_identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = Path('docs/source')
__lowerCamelCase : str = ['favicon.ico']
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__ ,only_modules=SCREAMING_SNAKE_CASE__)
| 652
| 1
|
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def A_ ( snake_case : Tuple , snake_case : List[str] , snake_case : int , snake_case : Optional[Any] , snake_case : List[Any] ) -> str:
'''simple docstring'''
with open(snake_case ) as metadata_file:
__UpperCamelCase = json.load(snake_case )
__UpperCamelCase = LukeConfig(use_entity_aware_attention=snake_case , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
__UpperCamelCase = torch.load(snake_case , map_location='''cpu''' )
# Load the entity vocab file
__UpperCamelCase = load_entity_vocab(snake_case )
__UpperCamelCase = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
__UpperCamelCase = AddedToken('''<ent>''' , lstrip=snake_case , rstrip=snake_case )
__UpperCamelCase = AddedToken('''<ent2>''' , lstrip=snake_case , rstrip=snake_case )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(snake_case )
with open(os.path.join(snake_case , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(snake_case , snake_case )
__UpperCamelCase = LukeTokenizer.from_pretrained(snake_case )
# Initialize the embeddings of the special tokens
__UpperCamelCase = state_dict['''embeddings.word_embeddings.weight''']
__UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
__UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
__UpperCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__UpperCamelCase = f"encoder.layer.{layer_index}.attention.self."
__UpperCamelCase = state_dict[prefix + matrix_name]
__UpperCamelCase = state_dict[prefix + matrix_name]
__UpperCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__UpperCamelCase = state_dict['''entity_embeddings.entity_embeddings.weight''']
__UpperCamelCase = entity_emb[entity_vocab['''[MASK]''']]
__UpperCamelCase = LukeModel(config=snake_case ).eval()
__UpperCamelCase , __UpperCamelCase = model.load_state_dict(snake_case , strict=snake_case )
if not (len(snake_case ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"Missing keys {', '.join(snake_case )}. Expected only missing embeddings.position_ids" )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
f" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}" )
# Check outputs
__UpperCamelCase = LukeTokenizer.from_pretrained(snake_case , task='''entity_classification''' )
__UpperCamelCase = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
__UpperCamelCase = (39, 42)
__UpperCamelCase = tokenizer(snake_case , entity_spans=[span] , add_prefix_space=snake_case , return_tensors='''pt''' )
__UpperCamelCase = model(**snake_case )
# Verify word hidden states
if model_size == "large":
__UpperCamelCase = torch.Size((1, 42, 1024) )
__UpperCamelCase = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
__UpperCamelCase = torch.Size((1, 42, 768) )
__UpperCamelCase = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
__UpperCamelCase = torch.Size((1, 1, 1024) )
__UpperCamelCase = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
__UpperCamelCase = torch.Size((1, 1, 768) )
__UpperCamelCase = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , snake_case , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(snake_case ) )
model.save_pretrained(snake_case )
def A_ ( snake_case : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = {}
with open(snake_case , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(snake_case ):
__UpperCamelCase , __UpperCamelCase = line.rstrip().split('''\t''' )
__UpperCamelCase = index
return entity_vocab
if __name__ == "__main__":
lowercase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowercase__ : Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 451
|
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
def A_ ( snake_case : List[str] ) -> List[str]:
'''simple docstring'''
print('''Loading config file...''' )
def flatten_yaml_as_dict(snake_case : Optional[int] , snake_case : List[Any]="" , snake_case : str="." ):
__UpperCamelCase = []
for k, v in d.items():
__UpperCamelCase = parent_key + sep + k if parent_key else k
if isinstance(snake_case , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case , snake_case , sep=snake_case ).items() )
else:
items.append((new_key, v) )
return dict(snake_case )
__UpperCamelCase = argparse.Namespace()
with open(snake_case , '''r''' ) as yaml_file:
try:
__UpperCamelCase = yaml.load(snake_case , Loader=yaml.FullLoader )
__UpperCamelCase = flatten_yaml_as_dict(snake_case )
for k, v in flat_cfg.items():
setattr(snake_case , snake_case , snake_case )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(snake_case , str(snake_case ) ) )
return config
def A_ ( snake_case : List[Any] , snake_case : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = MobileViTVaConfig()
__UpperCamelCase = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
__UpperCamelCase = 1000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__UpperCamelCase = 384
else:
__UpperCamelCase = 256
__UpperCamelCase = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
__UpperCamelCase = 21000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__UpperCamelCase = 384
else:
__UpperCamelCase = 256
__UpperCamelCase = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
__UpperCamelCase = 151
__UpperCamelCase = 512
__UpperCamelCase = '''ade20k-id2label.json'''
__UpperCamelCase = True
elif task_name.startswith('''voc_''' ):
__UpperCamelCase = 21
__UpperCamelCase = 512
__UpperCamelCase = '''pascal-voc-id2label.json'''
__UpperCamelCase = True
# orig_config
__UpperCamelCase = load_orig_config_file(snake_case )
assert getattr(snake_case , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
__UpperCamelCase = getattr(snake_case , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(snake_case , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__UpperCamelCase = getattr(snake_case , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__UpperCamelCase = getattr(snake_case , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
__UpperCamelCase = getattr(snake_case , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
__UpperCamelCase = getattr(snake_case , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
__UpperCamelCase = getattr(snake_case , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
__UpperCamelCase = '''huggingface/label-files'''
__UpperCamelCase = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase = {int(snake_case ): v for k, v in idalabel.items()}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def A_ ( snake_case : List[Any] , snake_case : int , snake_case : Any ) -> str:
'''simple docstring'''
__UpperCamelCase = dct.pop(snake_case )
__UpperCamelCase = val
def A_ ( snake_case : int , snake_case : List[Any]=False ) -> Optional[Any]:
'''simple docstring'''
if base_model:
__UpperCamelCase = ''''''
else:
__UpperCamelCase = '''mobilevitv2.'''
__UpperCamelCase = []
for k in state_dict.keys():
if k[:8] == "encoder.":
__UpperCamelCase = k[8:]
else:
__UpperCamelCase = k
if ".block." in k:
__UpperCamelCase = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
__UpperCamelCase = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
__UpperCamelCase = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
__UpperCamelCase = k_new.replace('''conv_1.''' , f"{model_prefix}conv_stem." )
for i in [1, 2]:
if f"layer_{i}." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}." , f"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
__UpperCamelCase = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
__UpperCamelCase = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f"layer_{i}.0." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.0." , f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if f"layer_{i}.1.local_rep.0." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.1.local_rep.0." , f"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if f"layer_{i}.1.local_rep.1." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.1.local_rep.1." , f"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
__UpperCamelCase = [0, 1]
elif i == 4:
__UpperCamelCase = [0, 1, 2, 3]
elif i == 5:
__UpperCamelCase = [0, 1, 2]
for j in j_in:
if f"layer_{i}.1.global_rep.{j}." in k:
__UpperCamelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j}." , f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if f"layer_{i}.1.global_rep.{j+1}." in k:
__UpperCamelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j+1}." , f"{model_prefix}encoder.layer.{i-1}.layernorm." )
if f"layer_{i}.1.conv_proj." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.1.conv_proj." , f"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
__UpperCamelCase = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
__UpperCamelCase = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
__UpperCamelCase = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
__UpperCamelCase = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
__UpperCamelCase = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
__UpperCamelCase = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
__UpperCamelCase = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
__UpperCamelCase = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
__UpperCamelCase = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def A_ ( snake_case : List[str] ) -> str:
'''simple docstring'''
__UpperCamelCase = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(snake_case )
for k in keys_to_ignore:
state_dict.pop(snake_case , snake_case )
def A_ ( ) -> str:
'''simple docstring'''
__UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__UpperCamelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def A_ ( snake_case : Dict , snake_case : List[str] , snake_case : Optional[Any] , snake_case : Optional[int] ) -> int:
'''simple docstring'''
__UpperCamelCase = get_mobilevitva_config(snake_case , snake_case )
# load original state_dict
__UpperCamelCase = torch.load(snake_case , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
__UpperCamelCase = MobileViTVaForSemanticSegmentation(snake_case ).eval()
__UpperCamelCase = False
else:
__UpperCamelCase = MobileViTVaForImageClassification(snake_case ).eval()
__UpperCamelCase = False
# remove and rename some keys of load the original model
__UpperCamelCase = checkpoint
remove_unused_keys(snake_case )
__UpperCamelCase = create_rename_keys(snake_case , base_model=snake_case )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
# load modified state_dict
model.load_state_dict(snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
__UpperCamelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__UpperCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCamelCase = model(**snake_case )
# verify classification model
if task_name.startswith('''imagenet''' ):
__UpperCamelCase = outputs.logits
__UpperCamelCase = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__UpperCamelCase = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] )
assert torch.allclose(logits[0, :3] , snake_case , atol=1e-4 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(f"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
lowercase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
lowercase__ : Tuple = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 451
| 1
|
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Any = split_dict._to_yaml_list()
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = SplitDict._from_yaml_list(_SCREAMING_SNAKE_CASE )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase__ : Dict = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase__ : Union[str, Any] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=_SCREAMING_SNAKE_CASE ), SplitInfo(dataset_name="my_dataset" )] )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 378
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , __lowercase : List[Any] , __lowercase : int=13 , __lowercase : int=7 , __lowercase : str=False , __lowercase : int=True , __lowercase : Union[str, Any]=False , __lowercase : str=True , __lowercase : Optional[int]=33 , __lowercase : Dict=32 , __lowercase : Tuple=5 , __lowercase : List[Any]=4 , __lowercase : Dict=37 , __lowercase : Any="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : List[str]=512 , __lowercase : str=16 , __lowercase : Tuple=2 , __lowercase : Tuple=0.02 , __lowercase : Tuple=3 , __lowercase : Union[str, Any]=4 , __lowercase : Dict=None , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self : Dict , __lowercase : List[str] , __lowercase : int , __lowercase : List[Any] , __lowercase : str , __lowercase : Tuple , __lowercase : Optional[int] ):
'''simple docstring'''
__a = EsmModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , attention_mask=__lowercase )
__a = model(__lowercase )
__a = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : int , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : int ):
'''simple docstring'''
__a = EsmForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : str , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : Any , __lowercase : Any ):
'''simple docstring'''
__a = self.num_labels
__a = EsmForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Optional[Any] =False
__lowerCamelCase : Optional[Any] =(
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__lowerCamelCase : List[Any] =()
__lowerCamelCase : str =(
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : str =True
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = EsmModelTester(self )
__a = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a = type
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = EsmModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()[0]
__a = EsmEmbeddings(config=__lowercase )
__a = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
__a = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
__a = create_position_ids_from_input_ids(__lowercase , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__lowercase , __lowercase ) ) )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()[0]
__a = EsmEmbeddings(config=__lowercase )
__a = torch.empty(2 , 4 , 30 )
__a = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
__a = torch.as_tensor([expected_single_positions, expected_single_positions] )
__a = embeddings.create_position_ids_from_inputs_embeds(__lowercase )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__lowercase , __lowercase ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
pass
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@slow
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
with torch.no_grad():
__a = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__a = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__a = model(__lowercase )[0]
__a = 33
__a = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __lowercase )
__a = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
with torch.no_grad():
__a = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__a = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__a = model(__lowercase )[0]
# compare the actual values for a slice.
__a = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1E-4 ) )
| 225
| 0
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = False
__lowerCamelCase = 3.0
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} )
self.assertDictEqual(MockClass(a=2 , b=lowercase ).to_kwargs() , {"a": 2, "b": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} )
@require_cuda
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
A__ = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
A__ = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , lowercase )
@require_multi_gpu
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = ["torchrun", F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(lowercase , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase__ = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
lowerCAmelCase__ = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCAmelCase__ = torch.nn.Linear(1_0_0, 2_0_0)
lowerCAmelCase__ = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCAmelCase__ = """"""
lowerCAmelCase__ = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 719
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__ = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
lowerCAmelCase__ = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ = None
# source code of `config_class`
A__ = inspect.getsource(SCREAMING_SNAKE_CASE_ )
A__ = _re_checkpoint.findall(SCREAMING_SNAKE_CASE_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
A__ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
A__ = F'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
A__ = ckpt_name
break
return checkpoint
def lowerCAmelCase__ ( ) -> List[str]:
'''simple docstring'''
A__ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
A__ = get_checkpoint_from_config_class(SCREAMING_SNAKE_CASE_ )
A__ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
A__ = "\n".join(sorted(SCREAMING_SNAKE_CASE_ ) )
raise ValueError(F'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 626
| 0
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
if not (isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )):
raise ValueError('longest_common_substring() takes two strings for inputs' )
lowercase__ = len(_SCREAMING_SNAKE_CASE )
lowercase__ = len(_SCREAMING_SNAKE_CASE )
lowercase__ = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
lowercase__ = 0
lowercase__ = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
lowercase__ = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
lowercase__ = i
lowercase__ = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 235
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """spm_char.model"""}
lowercase_ = {
"""vocab_file""": {
"""microsoft/speecht5_asr""": """https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model""",
"""microsoft/speecht5_tts""": """https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model""",
"""microsoft/speecht5_vc""": """https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model""",
}
}
lowercase_ = {
"""microsoft/speecht5_asr""": 1_024,
"""microsoft/speecht5_tts""": 1_024,
"""microsoft/speecht5_vc""": 1_024,
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : str = VOCAB_FILES_NAMES
_UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , a : Any , a : Any="<s>" , a : List[Any]="</s>" , a : List[str]="<unk>" , a : Any="<pad>" , a : Optional[Dict[str, Any]] = None , **a : Optional[Any] , )-> None:
"""simple docstring"""
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , unk_token=a , pad_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Tuple:
"""simple docstring"""
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE_ ( self : int )-> Tuple:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] )-> str:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self : Dict , a : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : str )-> List[str]:
"""simple docstring"""
return self.sp_model.encode(a , out_type=a )
def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[Any] )-> str:
"""simple docstring"""
return self.sp_model.piece_to_id(a )
def SCREAMING_SNAKE_CASE_ ( self : str , a : List[Any] )-> Dict:
"""simple docstring"""
lowercase__ = self.sp_model.IdToPiece(a )
return token
def SCREAMING_SNAKE_CASE_ ( self : str , a : Dict )-> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a ) + token
lowercase__ = []
else:
current_sub_tokens.append(a )
out_string += self.sp_model.decode(a )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self : str , a : List[Any] , a : Optional[Any]=None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self : int , a : List[int] , a : Optional[List[int]] = None , a : bool = False )-> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
lowercase__ = [1]
if token_ids_a is None:
return ([0] * len(a )) + suffix_ones
return ([0] * len(a )) + ([0] * len(a )) + suffix_ones
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , 'wb' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
| 235
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=_UpperCamelCase ):
"""simple docstring"""
a_ = ["""onnx"""]
def __init__( self : int , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Dict ) -> int:
requires_backends(self , ['onnx'] )
@classmethod
def lowercase ( cls : Tuple , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Optional[Any] ) -> List[str]:
requires_backends(cls , ['onnx'] )
@classmethod
def lowercase ( cls : List[str] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
requires_backends(cls , ['onnx'] )
| 709
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : List[Any] = '▁'
_snake_case : Tuple = {'vocab_file': 'spiece.model'}
_snake_case : Optional[int] = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
_snake_case : Union[str, Any] = {
'google/reformer-crime-and-punishment': 524288,
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any="</s>" , lowerCAmelCase_ : Any="<unk>" , lowerCAmelCase_ : List[Any]=[] , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : Any , ) -> None:
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
__lowerCAmelCase = vocab_file
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@property
def lowercase ( self : Any ) -> Any:
return self.sp_model.get_piece_size()
def lowercase ( self : int ) -> Dict[str, int]:
__lowerCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Any:
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__( self : Dict , lowerCAmelCase_ : str ) -> str:
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase ( self : int , lowerCAmelCase_ : str ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def lowercase ( self : Dict , lowerCAmelCase_ : Dict ) -> Tuple:
return self.sp_model.piece_to_id(lowerCAmelCase_ )
def lowercase ( self : Any , lowerCAmelCase_ : int ) -> Optional[int]:
if index < self.sp_model.get_piece_size():
__lowerCAmelCase = self.sp_model.IdToPiece(lowerCAmelCase_ )
return token
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = []
__lowerCAmelCase = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
__lowerCAmelCase = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def lowercase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , 'wb' ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
| 421
| 0
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
A : List[Any] = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
A : Any = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
A : Union[str, Any] = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A( datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = False , _snake_case = False , _snake_case = False , _snake_case = False , ) -> int:
'''simple docstring'''
__a = len(references[0] )
if any(len(_snake_case ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__a = [[refs[i] for refs in references] for i in range(_snake_case )]
__a = TER(
normalized=_snake_case , no_punct=_snake_case , asian_support=_snake_case , case_sensitive=_snake_case , )
__a = sb_ter.corpus_score(_snake_case , _snake_case )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 219
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __A( unittest.TestCase ):
snake_case_ = MODEL_FOR_CAUSAL_LM_MAPPING
snake_case_ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
__a = text_generator('''This is a test''' , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
__a = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
_snake_case , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
__a = text_generator('''This is a test''' , do_sample=_snake_case , num_return_sequences=2 , return_tensors=_snake_case )
self.assertEqual(
_snake_case , [
{'''generated_token_ids''': ANY(_snake_case )},
{'''generated_token_ids''': ANY(_snake_case )},
] , )
__a = text_generator.model.config.eos_token_id
__a = '''<pad>'''
__a = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=_snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=_snake_case , )
self.assertEqual(
_snake_case , [
[
{'''generated_token_ids''': ANY(_snake_case )},
{'''generated_token_ids''': ANY(_snake_case )},
],
[
{'''generated_token_ids''': ANY(_snake_case )},
{'''generated_token_ids''': ANY(_snake_case )},
],
] , )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
__a = text_generator('''This is a test''' , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
__a = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = TextGenerationPipeline(model=_snake_case , tokenizer=_snake_case )
return text_generator, ["This is a test", "Another test"]
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = '''Hello I believe in'''
__a = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
__a = text_generator(_snake_case )
self.assertEqual(
_snake_case , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
__a = text_generator(_snake_case , stop_sequence=''' fe''' )
self.assertEqual(_snake_case , [{'''generated_text''': '''Hello I believe in fe'''}] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> int:
'''simple docstring'''
__a = text_generator.model
__a = text_generator.tokenizer
__a = text_generator('''This is a test''' )
self.assertEqual(_snake_case , [{'''generated_text''': ANY(_snake_case )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
__a = text_generator('''This is a test''' , return_full_text=_snake_case )
self.assertEqual(_snake_case , [{'''generated_text''': ANY(_snake_case )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
__a = pipeline(task='''text-generation''' , model=_snake_case , tokenizer=_snake_case , return_full_text=_snake_case )
__a = text_generator('''This is a test''' )
self.assertEqual(_snake_case , [{'''generated_text''': ANY(_snake_case )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
__a = text_generator('''This is a test''' , return_full_text=_snake_case )
self.assertEqual(_snake_case , [{'''generated_text''': ANY(_snake_case )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
__a = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[{'''generated_text''': ANY(_snake_case )}, {'''generated_text''': ANY(_snake_case )}],
[{'''generated_text''': ANY(_snake_case )}, {'''generated_text''': ANY(_snake_case )}],
] , )
if text_generator.tokenizer.pad_token is not None:
__a = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[{'''generated_text''': ANY(_snake_case )}, {'''generated_text''': ANY(_snake_case )}],
[{'''generated_text''': ANY(_snake_case )}, {'''generated_text''': ANY(_snake_case )}],
] , )
with self.assertRaises(_snake_case ):
__a = text_generator('''test''' , return_full_text=_snake_case , return_text=_snake_case )
with self.assertRaises(_snake_case ):
__a = text_generator('''test''' , return_full_text=_snake_case , return_tensors=_snake_case )
with self.assertRaises(_snake_case ):
__a = text_generator('''test''' , return_text=_snake_case , return_tensors=_snake_case )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
__a = text_generator('''''' )
self.assertEqual(_snake_case , [{'''generated_text''': ANY(_snake_case )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
__a = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
__a = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 10_000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
__a = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_snake_case ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
import torch
# Classic `model_kwargs`
__a = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__a = pipe('''This is a test''' )
self.assertEqual(
_snake_case , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
__a = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__a = pipe('''This is a test''' )
self.assertEqual(
_snake_case , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
__a = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
__a = pipe('''This is a test''' )
self.assertEqual(
_snake_case , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
import torch
__a = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
import torch
__a = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=_snake_case , top_p=0.5 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = '''Hello world'''
__a = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
__a = logging.get_logger('''transformers.generation.tf_utils''' )
else:
__a = logging.get_logger('''transformers.generation.utils''' )
__a = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_snake_case ) as cl:
__a = text_generator(_snake_case , max_length=10 , max_new_tokens=1 )
self.assertIn(_snake_case , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_snake_case ) as cl:
__a = text_generator(_snake_case , max_new_tokens=1 )
self.assertNotIn(_snake_case , cl.out )
with CaptureLogger(_snake_case ) as cl:
__a = text_generator(_snake_case , max_length=10 )
self.assertNotIn(_snake_case , cl.out )
| 219
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : str , __UpperCamelCase : TransformeraDModel , __UpperCamelCase : AutoencoderKL , __UpperCamelCase : KarrasDiffusionSchedulers , __UpperCamelCase : Optional[Dict[int, str]] = None , )->Dict:
super().__init__()
self.register_modules(transformer=__UpperCamelCase , vae=__UpperCamelCase , scheduler=__UpperCamelCase )
# create a imagenet -> id dictionary for easier use
_UpperCAmelCase = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
_UpperCAmelCase = int(__UpperCamelCase )
_UpperCAmelCase = dict(sorted(self.labels.items() ) )
def lowercase__ ( self : int , __UpperCamelCase : Union[str, List[str]] )->List[int]:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = list(__UpperCamelCase )
for l in label:
if l not in self.labels:
raise ValueError(
F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : List[str] , __UpperCamelCase : List[int] , __UpperCamelCase : float = 4.0 , __UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase : int = 5_0 , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , )->Union[ImagePipelineOutput, Tuple]:
_UpperCAmelCase = len(__UpperCamelCase )
_UpperCAmelCase = self.transformer.config.sample_size
_UpperCAmelCase = self.transformer.config.in_channels
_UpperCAmelCase = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__UpperCamelCase , device=self.device , dtype=self.transformer.dtype , )
_UpperCAmelCase = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
_UpperCAmelCase = torch.tensor(__UpperCamelCase , device=self.device ).reshape(-1 )
_UpperCAmelCase = torch.tensor([1_0_0_0] * batch_size , device=self.device )
_UpperCAmelCase = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
_UpperCAmelCase = latent_model_input[: len(__UpperCamelCase ) // 2]
_UpperCAmelCase = torch.cat([half, half] , dim=0 )
_UpperCAmelCase = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = t
if not torch.is_tensor(__UpperCamelCase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
_UpperCAmelCase = latent_model_input.device.type == '''mps'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = torch.floataa if is_mps else torch.floataa
else:
_UpperCAmelCase = torch.intaa if is_mps else torch.intaa
_UpperCAmelCase = torch.tensor([timesteps] , dtype=__UpperCamelCase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
_UpperCAmelCase = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_UpperCAmelCase = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
_UpperCAmelCase = self.transformer(
__UpperCamelCase , timestep=__UpperCamelCase , class_labels=__UpperCamelCase ).sample
# perform guidance
if guidance_scale > 1:
_UpperCAmelCase , _UpperCAmelCase = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
_UpperCAmelCase , _UpperCAmelCase = torch.split(__UpperCamelCase , len(__UpperCamelCase ) // 2 , dim=0 )
_UpperCAmelCase = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
_UpperCAmelCase = torch.cat([half_eps, half_eps] , dim=0 )
_UpperCAmelCase = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
_UpperCAmelCase , _UpperCAmelCase = torch.split(__UpperCamelCase , __UpperCamelCase , dim=1 )
else:
_UpperCAmelCase = noise_pred
# compute previous image: x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
if guidance_scale > 1:
_UpperCAmelCase , _UpperCAmelCase = latent_model_input.chunk(2 , dim=0 )
else:
_UpperCAmelCase = latent_model_input
_UpperCAmelCase = 1 / self.vae.config.scaling_factor * latents
_UpperCAmelCase = self.vae.decode(__UpperCamelCase ).sample
_UpperCAmelCase = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_UpperCAmelCase = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 711
|
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def is_in_circle(_SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> bool:
_UpperCAmelCase = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_UpperCAmelCase = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_SCREAMING_SNAKE_CASE ) )
# The ratio of the area for circle to square is pi/4.
_UpperCAmelCase = proportion * 4
print(f'The estimated value of pi is {pi_estimate}' )
print(f'The numpy value of pi is {pi}' )
print(f'The total error is {abs(pi - pi_estimate )}' )
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Callable[[float], float] , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for _ in range(_SCREAMING_SNAKE_CASE ) ) * (max_value - min_value)
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 ):
'''simple docstring'''
def identity_function(_SCREAMING_SNAKE_CASE : float ) -> float:
return x
_UpperCAmelCase = area_under_curve_estimator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {expected_value}' )
print(f'Total error is {abs(estimated_value - expected_value )}' )
print('''******************''' )
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def function_to_integrate(_SCREAMING_SNAKE_CASE : float ) -> float:
return sqrt(4.0 - x * x )
_UpperCAmelCase = area_under_curve_estimator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {pi}' )
print(f'Total error is {abs(estimated_value - pi )}' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95
| 0
|
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
A_ : Dict = logging.get_logger(__name__)
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = ['''audio_values''', '''audio_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=2_0_4_8 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=[1_6, 1_6] , __SCREAMING_SNAKE_CASE=1_2_8 , __SCREAMING_SNAKE_CASE=4_4_1_0_0 , __SCREAMING_SNAKE_CASE=8_6 , __SCREAMING_SNAKE_CASE=2_0_4_8 , __SCREAMING_SNAKE_CASE=0.0 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
snake_case__ : Tuple = spectrogram_length
snake_case__ : List[Any] = num_channels
snake_case__ : List[str] = patch_size
snake_case__ : Dict = feature_size // self.patch_size[1]
snake_case__ : Optional[Any] = n_fft
snake_case__ : Optional[int] = sampling_rate // hop_length_to_sampling_rate
snake_case__ : Optional[int] = sampling_rate
snake_case__ : str = padding_value
snake_case__ : Tuple = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm="""slaney""" , mel_scale="""slaney""" , ).T
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
snake_case__ : List[str] = log_spec[:, :-1]
snake_case__ : Optional[Any] = log_spec - 20.0
snake_case__ : List[str] = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
f" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
snake_case__ : List[str] = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
snake_case__ : int = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case__ : Union[str, Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
snake_case__ : Optional[int] = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case__ : int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case__ : Union[str, Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
snake_case__ : str = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
snake_case__ : Union[str, Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
snake_case__ : int = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
snake_case__ : List[str] = np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa )
# convert into correct format for padding
snake_case__ : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
snake_case__ : Dict = np.ones([len(__SCREAMING_SNAKE_CASE ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
snake_case__ : int = padded_audio_features * self.padding_value
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
snake_case__ : str = audio_features[i]
snake_case__ : Tuple = feature
# return as BatchFeature
if return_attention_mask:
snake_case__ : int = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
snake_case__ : Any = {"""audio_values""": padded_audio_features}
snake_case__ : Union[str, Any] = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_inputs
| 38
|
import math
from datetime import datetime, timedelta
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
__a : Union[str, Any] = year % 1_9
__a : int = year % 4
__a : Optional[int] = year % 7
__a : Dict = math.floor(year / 1_0_0 )
__a : Optional[Any] = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__a : Union[str, Any] = leap_day_inhibits / 4
__a : str = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__a : Union[str, Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__a : List[Any] = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__a : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase_ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase_ , 4 , 1_8 )
else:
return datetime(lowerCamelCase_ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
SCREAMING_SNAKE_CASE__ = '''will be''' if year > datetime.now().year else '''was'''
print(F"Easter in {year} {tense} {gauss_easter(year)}")
| 47
| 0
|
from ..utils import DummyObject, requires_backends
class _UpperCamelCase( metaclass=SCREAMING_SNAKE_CASE ):
__A: Dict = ["""keras_nlp"""]
def __init__( self : List[str] , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Optional[Any] ):
requires_backends(self , ["keras_nlp"] )
| 328
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class _UpperCamelCase( SCREAMING_SNAKE_CASE ):
__A: Tuple = """funnel"""
__A: Dict = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
}
def __init__( self : Tuple , _lowerCamelCase : Optional[Any]=3_05_22 , _lowerCamelCase : Any=[4, 4, 4] , _lowerCamelCase : Dict=None , _lowerCamelCase : List[str]=2 , _lowerCamelCase : int=7_68 , _lowerCamelCase : Optional[Any]=12 , _lowerCamelCase : Any=64 , _lowerCamelCase : Union[str, Any]=30_72 , _lowerCamelCase : Optional[Any]="gelu_new" , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Union[str, Any]=0.1 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : str=0.1 , _lowerCamelCase : Any=None , _lowerCamelCase : Any=1E-9 , _lowerCamelCase : str="mean" , _lowerCamelCase : str="relative_shift" , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Dict=True , _lowerCamelCase : int=True , **_lowerCamelCase : Union[str, Any] , ):
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Optional[Any] = block_sizes
_UpperCAmelCase : str = [1] * len(_lowerCamelCase ) if block_repeats is None else block_repeats
assert len(_lowerCamelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_UpperCAmelCase : List[str] = num_decoder_layers
_UpperCAmelCase : str = d_model
_UpperCAmelCase : int = n_head
_UpperCAmelCase : str = d_head
_UpperCAmelCase : List[Any] = d_inner
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : Any = hidden_dropout
_UpperCAmelCase : Union[str, Any] = attention_dropout
_UpperCAmelCase : int = activation_dropout
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : List[str] = initializer_std
_UpperCAmelCase : List[str] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
_UpperCAmelCase : Union[str, Any] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
_UpperCAmelCase : str = attention_type
_UpperCAmelCase : Union[str, Any] = separate_cls
_UpperCAmelCase : List[str] = truncate_seq
_UpperCAmelCase : Optional[int] = pool_q_only
super().__init__(**_lowerCamelCase )
@property
def a__ ( self : Dict ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def a__ ( self : List[Any] , _lowerCamelCase : Any ):
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." )
@property
def a__ ( self : Optional[int] ):
return len(self.block_sizes )
@num_blocks.setter
def a__ ( self : List[str] , _lowerCamelCase : Any ):
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
| 328
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__A : List[Any] = None
__A : Optional[Any] = logging.get_logger(__name__)
__A : str = '▁'
__A : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__A : Any = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
__A : Optional[int] = {
'google/pegasus-xsum': 512,
}
class __UpperCamelCase ( UpperCAmelCase__ ):
lowercase : Dict = VOCAB_FILES_NAMES
lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Tuple = PegasusTokenizer
lowercase : List[str] = ['input_ids', 'attention_mask']
def __init__( self :Tuple ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :Union[str, Any]="<pad>" ,_UpperCamelCase :int="</s>" ,_UpperCamelCase :int="<unk>" ,_UpperCamelCase :Optional[Any]="<mask_2>" ,_UpperCamelCase :Optional[Any]="<mask_1>" ,_UpperCamelCase :Optional[Any]=None ,_UpperCamelCase :Dict=1_0_3 ,**_UpperCamelCase :Dict ,):
snake_case_ : List[str] = offset
if additional_special_tokens is not None:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError(
F'''additional_special_tokens should be of type {type(_UpperCAmelCase )}, but is'''
F''' {type(_UpperCAmelCase )}''' )
snake_case_ : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(_UpperCAmelCase ) ,self.offset - 1 )
]
if len(set(_UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
snake_case_ : List[str] = additional_special_tokens_extended
else:
snake_case_ : Any = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2 ,self.offset )]
super().__init__(
_UpperCAmelCase ,tokenizer_file=_UpperCAmelCase ,pad_token=_UpperCAmelCase ,eos_token=_UpperCAmelCase ,unk_token=_UpperCAmelCase ,mask_token=_UpperCAmelCase ,mask_token_sent=_UpperCAmelCase ,offset=_UpperCAmelCase ,additional_special_tokens=_UpperCAmelCase ,**_UpperCAmelCase ,)
snake_case_ : Dict = vocab_file
snake_case_ : str = False if not self.vocab_file else True
def a__ ( self :Optional[Any] ,_UpperCamelCase :Tuple ):
snake_case_ : Any = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
F''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def a__ ( self :Optional[Any] ,_UpperCamelCase :List ,_UpperCamelCase :Optional[List] = None ,_UpperCamelCase :bool = False ):
if already_has_special_tokens:
return self._special_token_mask(_UpperCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(_UpperCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def a__ ( self :int ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Union[str, Any]=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def a__ ( self :str ,_UpperCamelCase :str ,_UpperCamelCase :Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ : List[Any] = os.path.join(
_UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file ,_UpperCAmelCase )
return (out_vocab_file,)
| 334
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class A ( UpperCAmelCase__ ):
'''simple docstring'''
@slow
@require_torch
def lowerCamelCase__ (self : int ) -> Tuple:
"""simple docstring"""
lowercase__ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
lowercase__ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowercase__ = bertabert.config.encoder.vocab_size
lowercase__ = tokenizer.sep_token_id
lowercase__ = tokenizer.cls_token_id
lowercase__ = 128
lowercase__ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
lowercase__ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
lowercase__ = train_dataset.select(range(32 ) )
lowercase__ = val_dataset.select(range(16 ) )
lowercase__ = 4
def _map_to_encoder_decoder_inputs(_UpperCAmelCase : Any ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase__ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=_UpperCAmelCase , max_length=512 )
lowercase__ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=_UpperCAmelCase , max_length=128 )
lowercase__ = inputs.input_ids
lowercase__ = inputs.attention_mask
lowercase__ = outputs.input_ids
lowercase__ = outputs.input_ids.copy()
lowercase__ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
lowercase__ = outputs.attention_mask
assert all(len(_UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(_UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_UpperCAmelCase : List[Any] ):
lowercase__ = pred.label_ids
lowercase__ = pred.predictions
# all unnecessary tokens are removed
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
lowercase__ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_UpperCAmelCase ) )] ) / len(_UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
lowercase__ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
lowercase__ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = SeqaSeqTrainingArguments(
output_dir=_UpperCAmelCase , per_device_train_batch_size=_UpperCAmelCase , per_device_eval_batch_size=_UpperCAmelCase , predict_with_generate=_UpperCAmelCase , evaluation_strategy="""steps""" , do_train=_UpperCAmelCase , do_eval=_UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowercase__ = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
# start training
trainer.train()
| 15
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ : List[Any] = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 720
|
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( lowercase_, lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VQModel
_SCREAMING_SNAKE_CASE = """sample"""
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=(3_2, 3_2) ):
lowerCAmelCase_ : Tuple = 4
lowerCAmelCase_ : Optional[Any] = 3
lowerCAmelCase_ : int = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return (3, 3_2, 3_2)
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return (3, 3_2, 3_2)
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : Union[str, Any] = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
lowerCAmelCase_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self : str ):
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[Any] = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : Any = VQModel.from_pretrained('fusing/vqgan-dummy' )
model.to(SCREAMING_SNAKE_CASE_ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCAmelCase_ : int = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
lowerCAmelCase_ : Dict = image.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
lowerCAmelCase_ : int = model(SCREAMING_SNAKE_CASE_ ).sample
lowerCAmelCase_ : Union[str, Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase_ : int = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
| 317
| 0
|
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase__ : List[Any] = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase__ : List[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def A ( snake_case__ : list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
__snake_case = []
for i in range(len(UpperCamelCase_ ) ):
__snake_case = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__snake_case = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(UpperCamelCase_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(UpperCamelCase_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(UpperCamelCase_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__snake_case = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(UpperCamelCase_ )
return next_generation
def A ( snake_case__ : list[list[int]] , snake_case__ : int ) -> list[Image.Image]:
'''simple docstring'''
__snake_case = []
for _ in range(UpperCamelCase_ ):
# Create output image
__snake_case = Image.new('RGB' , (len(cells[0] ), len(UpperCamelCase_ )) )
__snake_case = img.load()
# Save cells to image
for x in range(len(UpperCamelCase_ ) ):
for y in range(len(cells[0] ) ):
__snake_case = 255 - cells[y][x] * 255
__snake_case = (colour, colour, colour)
# Save image
images.append(UpperCamelCase_ )
__snake_case = new_generation(UpperCamelCase_ )
return images
if __name__ == "__main__":
UpperCAmelCase__ : int = generate_images(GLIDER, 16)
images[0].save("out.gif", save_all=True, append_images=images[1:])
| 313
|
'''simple docstring'''
from __future__ import annotations
def a ( UpperCamelCase_ : str , UpperCamelCase_ : list[str] | None = None , UpperCamelCase_ : dict[str, float] | None = None , UpperCamelCase_ : bool = False , ) -> tuple[int, float, str]:
snake_case__ =cipher_alphabet or [chr(UpperCamelCase_ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
snake_case__ ={
'a': 0.0_8_4_9_7,
'b': 0.0_1_4_9_2,
'c': 0.0_2_2_0_2,
'd': 0.0_4_2_5_3,
'e': 0.1_1_1_6_2,
'f': 0.0_2_2_2_8,
'g': 0.0_2_0_1_5,
'h': 0.0_6_0_9_4,
'i': 0.0_7_5_4_6,
'j': 0.0_0_1_5_3,
'k': 0.0_1_2_9_2,
'l': 0.0_4_0_2_5,
'm': 0.0_2_4_0_6,
'n': 0.0_6_7_4_9,
'o': 0.0_7_5_0_7,
'p': 0.0_1_9_2_9,
'q': 0.0_0_0_9_5,
'r': 0.0_7_5_8_7,
's': 0.0_6_3_2_7,
't': 0.0_9_3_5_6,
'u': 0.0_2_7_5_8,
'v': 0.0_0_9_7_8,
'w': 0.0_2_5_6_0,
'x': 0.0_0_1_5_0,
'y': 0.0_1_9_9_4,
'z': 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
snake_case__ =frequencies_dict
if not case_sensitive:
snake_case__ =ciphertext.lower()
# Chi squared statistic values
snake_case__ ={}
# cycle through all of the shifts
for shift in range(len(UpperCamelCase_ ) ):
snake_case__ =''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
snake_case__ =(alphabet_letters.index(letter.lower() ) - shift) % len(
UpperCamelCase_ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
snake_case__ =0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
snake_case__ =letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
snake_case__ =decrypted_with_shift.lower().count(UpperCamelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
snake_case__ =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
snake_case__ =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
snake_case__ =decrypted_with_shift.count(UpperCamelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
snake_case__ =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
snake_case__ =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
snake_case__ =(
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(UpperCamelCase_ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
snake_case__ =min(
UpperCamelCase_ , key=UpperCamelCase_ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
snake_case__
) , (
snake_case__
) ,
) =chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 538
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase : Optional[Any] = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 407
|
from functools import lru_cache
@lru_cache
def _lowerCAmelCase ( __magic_name__ :int ):
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 407
| 1
|
'''simple docstring'''
import json
import sys
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
with open(UpperCAmelCase__ , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE__ :Optional[int] = json.load(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :int = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ :Optional[Any] = results[benchmark_name]
SCREAMING_SNAKE_CASE__ :Union[str, Any] = benchmark_name.split('/' )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
SCREAMING_SNAKE_CASE__ :List[str] = '| metric |'
SCREAMING_SNAKE_CASE__ :List[Any] = '|--------|'
SCREAMING_SNAKE_CASE__ :Optional[Any] = '| new / old (diff) |'
for metric_name in sorted(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ :str = benchmark_res[metric_name]
SCREAMING_SNAKE_CASE__ :Optional[int] = metric_vals['new']
SCREAMING_SNAKE_CASE__ :Any = metric_vals.get('old' , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Any = metric_vals.get('diff' , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = F''' {new_val:f}''' if isinstance(UpperCAmelCase__ , (int, float) ) else 'None'
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(UpperCAmelCase__ , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(UpperCAmelCase__ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.writelines('\n'.join(UpperCAmelCase__ ) )
if __name__ == "__main__":
UpperCamelCase_ = sys.argv[1]
UpperCamelCase_ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 209
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :list[list[str]] = [[] for _ in range(UpperCAmelCase__ )]
SCREAMING_SNAKE_CASE__ :Any = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(UpperCAmelCase__ ) <= key:
return input_string
for position, character in enumerate(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ :Dict = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE__ :Tuple = min(UpperCAmelCase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :int = [''.join(UpperCAmelCase__ ) for row in temp_grid]
SCREAMING_SNAKE_CASE__ :str = ''.join(UpperCAmelCase__ )
return output_string
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = []
SCREAMING_SNAKE_CASE__ :str = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
SCREAMING_SNAKE_CASE__ :list[list[str]] = [[] for _ in range(UpperCAmelCase__ )] # generates template
for position in range(len(UpperCAmelCase__ ) ):
SCREAMING_SNAKE_CASE__ :Optional[int] = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE__ :Dict = min(UpperCAmelCase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
SCREAMING_SNAKE_CASE__ :Any = 0
for row in temp_grid: # fills in the characters
SCREAMING_SNAKE_CASE__ :int = input_string[counter : counter + len(UpperCAmelCase__ )]
grid.append(list(UpperCAmelCase__ ) )
counter += len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Tuple = '' # reads as zigzag
for position in range(len(UpperCAmelCase__ ) ):
SCREAMING_SNAKE_CASE__ :Union[str, Any] = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE__ :Any = min(UpperCAmelCase__ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def lowerCamelCase ( UpperCAmelCase__ : str ) -> dict[int, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = {}
for key_guess in range(1 , len(UpperCAmelCase__ ) ): # tries every key
SCREAMING_SNAKE_CASE__ :List[str] = decrypt(UpperCAmelCase__ , UpperCAmelCase__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 209
| 1
|
def a_ ( __snake_case ) -> None:
'''simple docstring'''
UpperCamelCase_ = generate_pascal_triangle(__snake_case )
for row_idx in range(__snake_case ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def a_ ( __snake_case ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(__snake_case , __snake_case ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
UpperCamelCase_ = []
for current_row_idx in range(__snake_case ):
UpperCamelCase_ = populate_current_row(__snake_case , __snake_case )
triangle.append(__snake_case )
return triangle
def a_ ( __snake_case , __snake_case ) -> list[int]:
'''simple docstring'''
UpperCamelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCamelCase_ , UpperCamelCase_ = 1, 1
for current_col_idx in range(1 , __snake_case ):
calculate_current_element(
__snake_case , __snake_case , __snake_case , __snake_case )
return current_row
def a_ ( __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
'''simple docstring'''
UpperCamelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCamelCase_ = triangle[current_row_idx - 1][current_col_idx]
UpperCamelCase_ = above_to_left_elt + above_to_right_elt
def a_ ( __snake_case ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(__snake_case , __snake_case ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
UpperCamelCase_ = [[1]]
for row_index in range(1 , __snake_case ):
UpperCamelCase_ = [0] + result[-1] + [0]
UpperCamelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
UpperCamelCase_ = sum(divmod(__snake_case , 2 ) )
UpperCamelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCamelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCamelCase_ = row_first_half + row_second_half
result.append(__snake_case )
return result
def a_ ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__snake_case , __snake_case ) -> None:
UpperCamelCase_ = F'''{func.__name__}({value})'''
UpperCamelCase_ = timeit(F'''__main__.{call}''' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'''{call:38} -- {timing:.4f} seconds''' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__snake_case , __snake_case )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 712
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class A :
def __init__( self : Optional[int] , __UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = data
UpperCamelCase_ = [0X6745_2301, 0XEFCD_AB89, 0X98BA_DCFE, 0X1032_5476, 0XC3D2_E1F0]
@staticmethod
def lowercase__ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0XFFFF_FFFF
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = B'\x80' + B'\x00' * (63 - (len(self.data ) + 8) % 64)
UpperCamelCase_ = self.data + padding + struct.pack('>Q' , 8 * len(self.data ) )
return padded_data
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def lowercase__ ( self : Union[str, Any] , __UpperCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
UpperCamelCase_ = list(struct.unpack('>16L' , __UpperCAmelCase ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCamelCase_ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def lowercase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.padding()
UpperCamelCase_ = self.split_blocks()
for block in self.blocks:
UpperCamelCase_ = self.expand_block(__UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCamelCase_ = (b & c) | ((~b) & d)
UpperCamelCase_ = 0X5A82_7999
elif 20 <= i < 40:
UpperCamelCase_ = b ^ c ^ d
UpperCamelCase_ = 0X6ED9_EBA1
elif 40 <= i < 60:
UpperCamelCase_ = (b & c) | (b & d) | (c & d)
UpperCamelCase_ = 0X8F1B_BCDC
elif 60 <= i < 80:
UpperCamelCase_ = b ^ c ^ d
UpperCamelCase_ = 0XCA62_C1D6
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = (
self.rotate(__UpperCAmelCase , 5 ) + f + e + k + expanded_block[i] & 0XFFFF_FFFF,
a,
self.rotate(__UpperCAmelCase , 30 ),
c,
d,
)
UpperCamelCase_ = (
self.h[0] + a & 0XFFFF_FFFF,
self.h[1] + b & 0XFFFF_FFFF,
self.h[2] + c & 0XFFFF_FFFF,
self.h[3] + d & 0XFFFF_FFFF,
self.h[4] + e & 0XFFFF_FFFF,
)
return ("{:08x}" * 5).format(*self.h )
def a_ ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase_ = B'Test String'
assert SHAaHash(__snake_case ).final_hash() == hashlib.shaa(__snake_case ).hexdigest() # noqa: S324
def a_ ( ) -> str:
'''simple docstring'''
UpperCamelCase_ = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
UpperCamelCase_ = f.read()
else:
UpperCamelCase_ = bytes(__snake_case , 'utf-8' )
print(SHAaHash(__snake_case ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 559
| 0
|
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_lowercase : Dict = pd.read_csv('sample_data.csv', header=None)
_lowercase : Optional[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
_lowercase : Optional[int] = df.iloc[:, 1:2]
_lowercase : Optional[int] = actual_data.values.reshape(len_data, 1)
_lowercase : str = MinMaxScaler().fit_transform(actual_data)
_lowercase : Optional[int] = 10
_lowercase : Any = 5
_lowercase : Union[str, Any] = 20
_lowercase : Union[str, Any] = len_data - periods * look_back
_lowercase : int = actual_data[:division]
_lowercase : Optional[Any] = actual_data[division - look_back :]
_lowercase ,_lowercase : List[str] = [], []
_lowercase ,_lowercase : Any = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_lowercase : Optional[int] = np.array(train_x)
_lowercase : Any = np.array(test_x)
_lowercase : Union[str, Any] = np.array([list(i.ravel()) for i in train_y])
_lowercase : Tuple = np.array([list(i.ravel()) for i in test_y])
_lowercase : int = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
_lowercase : Any = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
_lowercase : Optional[Any] = model.predict(x_test)
| 49
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''YolosFeatureExtractor''']
__A = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 593
| 0
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False , ) -> Tuple:
super().__init__()
_A = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
_A = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
_A = False
_A = nn.Dropout(p=lowerCAmelCase_ )
_A = TaConfig(
vocab_size=lowerCAmelCase_ , d_model=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , feed_forward_proj=lowerCAmelCase_ , is_decoder=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , )
_A = nn.ModuleList()
for lyr_num in range(lowerCAmelCase_ ):
_A = TaBlock(lowerCAmelCase_ )
self.encoders.append(lowerCAmelCase_ )
_A = TaLayerNorm(lowerCAmelCase_ )
_A = nn.Dropout(p=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_A = self.token_embedder(lowerCAmelCase_ )
_A = encoder_input_tokens.shape[1]
_A = torch.arange(lowerCAmelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCAmelCase_ )
_A = self.dropout_pre(lowerCAmelCase_ )
# inverted the attention mask
_A = encoder_input_tokens.size()
_A = self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_ )
for lyr in self.encoders:
_A = lyr(lowerCAmelCase_ , lowerCAmelCase_ )[0]
_A = self.layer_norm(lowerCAmelCase_ )
return self.dropout_post(lowerCAmelCase_ ), encoder_inputs_mask
| 83
|
import colorsys
from PIL import Image # type: ignore
def snake_case ( snake_case__ :float , snake_case__ :float , snake_case__ :int) -> float:
_A = x
_A = y
for step in range(snake_case__): # noqa: B007
_A = a * a - b * b + x
_A = 2 * a * b + y
_A = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1))
def snake_case ( snake_case__ :int = 800 , snake_case__ :int = 600 , snake_case__ :float = -0.6 , snake_case__ :float = 0 , snake_case__ :float = 3.2 , snake_case__ :int = 50 , snake_case__ :bool = True , ) -> Image.Image:
_A = Image.new("""RGB""" , (image_width, image_height))
_A = img.load()
# loop through the image-coordinates
for image_x in range(snake_case__):
for image_y in range(snake_case__):
# determine the figure-coordinates based on the image-coordinates
_A = figure_width / image_width * image_height
_A = figure_center_x + (image_x / image_width - 0.5) * figure_width
_A = figure_center_y + (image_y / image_height - 0.5) * figure_height
_A = get_distance(snake_case__ , snake_case__ , snake_case__)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_A = get_color_coded_rgb(snake_case__)
else:
_A = get_black_and_white_rgb(snake_case__)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_SCREAMING_SNAKE_CASE = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 83
| 1
|
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Optional[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : List[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print('\n'.join(space_files) + '\n')
A_ : List[str] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print('\n'.join(hyphen_files) + '\n')
A_ : str = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print('\n'.join(nodir_files) + '\n')
A_ : List[str] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 265
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Optional[int] = {
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = [
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 265
| 1
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {'vocab_file': 'spiece.model'}
__lowerCamelCase = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
__lowerCamelCase = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = 3
__lowerCamelCase = 4
class _UpperCamelCase( lowercase__ ):
__A: Tuple = VOCAB_FILES_NAMES
__A: List[Any] = PRETRAINED_VOCAB_FILES_MAP
__A: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A: Optional[Any] = """left"""
def __init__( self : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any]=False , _lowerCamelCase : Dict=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : List[str]="<s>" , _lowerCamelCase : Any="</s>" , _lowerCamelCase : Optional[int]="<unk>" , _lowerCamelCase : Tuple="<sep>" , _lowerCamelCase : Tuple="<pad>" , _lowerCamelCase : Optional[int]="<cls>" , _lowerCamelCase : Dict="<mask>" , _lowerCamelCase : Union[str, Any]=["<eop>", "<eod>"] , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Dict , ):
_UpperCAmelCase : Dict = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
_UpperCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
_UpperCAmelCase : Optional[Any] = 3
_UpperCAmelCase : int = do_lower_case
_UpperCAmelCase : Tuple = remove_space
_UpperCAmelCase : List[Any] = keep_accents
_UpperCAmelCase : Tuple = vocab_file
_UpperCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
@property
def a__ ( self : Tuple ):
return len(self.sp_model )
def a__ ( self : Optional[Any] ):
_UpperCAmelCase : List[Any] = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
_UpperCAmelCase : int = self.__dict__.copy()
_UpperCAmelCase : List[Any] = None
return state
def __setstate__( self : Any , _lowerCamelCase : List[str] ):
_UpperCAmelCase : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase : Dict = {}
_UpperCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__ ( self : str , _lowerCamelCase : Optional[Any] ):
if self.remove_space:
_UpperCAmelCase : str = ''' '''.join(inputs.strip().split() )
else:
_UpperCAmelCase : int = inputs
_UpperCAmelCase : List[Any] = outputs.replace("``" , "\"" ).replace("\'\'" , "\"" )
if not self.keep_accents:
_UpperCAmelCase : Any = unicodedata.normalize("NFKD" , UpperCAmelCase__ )
_UpperCAmelCase : int = ''''''.join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] )
if self.do_lower_case:
_UpperCAmelCase : int = outputs.lower()
return outputs
def a__ ( self : Optional[Any] , _lowerCamelCase : str ):
_UpperCAmelCase : List[str] = self.preprocess_text(UpperCAmelCase__ )
_UpperCAmelCase : Dict = self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
_UpperCAmelCase : List[str] = []
for piece in pieces:
if len(UpperCAmelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_UpperCAmelCase : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_UpperCAmelCase : List[Any] = cur_pieces[1:]
else:
_UpperCAmelCase : List[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase__ )
else:
new_pieces.append(UpperCAmelCase__ )
return new_pieces
def a__ ( self : List[Any] , _lowerCamelCase : Optional[int] ):
return self.sp_model.PieceToId(UpperCAmelCase__ )
def a__ ( self : Tuple , _lowerCamelCase : Dict ):
return self.sp_model.IdToPiece(UpperCAmelCase__ )
def a__ ( self : Union[str, Any] , _lowerCamelCase : str ):
_UpperCAmelCase : str = ''''''.join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , " " ).strip()
return out_string
def a__ ( self : Optional[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : bool = False , _lowerCamelCase : bool = None , _lowerCamelCase : bool = True , **_lowerCamelCase : Union[str, Any] , ):
_UpperCAmelCase : Optional[int] = kwargs.pop("use_source_tokenizer" , UpperCAmelCase__ )
_UpperCAmelCase : Any = self.convert_ids_to_tokens(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Optional[Any] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase__ ) )
_UpperCAmelCase : List[Any] = []
sub_texts.append(UpperCAmelCase__ )
else:
current_sub_text.append(UpperCAmelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_UpperCAmelCase : Any = ''''''.join(UpperCAmelCase__ )
_UpperCAmelCase : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_UpperCAmelCase : str = self.clean_up_tokenization(UpperCAmelCase__ )
return clean_text
else:
return text
def a__ ( self : str , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
_UpperCAmelCase : Dict = [self.sep_token_id]
_UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def a__ ( self : str , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1]
return ([0] * len(UpperCAmelCase__ )) + [1, 1]
def a__ ( self : Union[str, Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
_UpperCAmelCase : Optional[Any] = [self.sep_token_id]
_UpperCAmelCase : Tuple = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def a__ ( self : Any , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase : Optional[Any] = os.path.join(
UpperCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , "wb" ) as fi:
_UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
| 700
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__lowerCamelCase = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> Optional[Any]:
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase : Tuple = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_UpperCAmelCase : Any = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_UpperCAmelCase : Tuple = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCAmelCase : Dict = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCAmelCase : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _UpperCamelCase:
def __init__( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Tuple=13 , _lowerCamelCase : Optional[Any]=7 , _lowerCamelCase : Dict=True , _lowerCamelCase : Tuple=False , _lowerCamelCase : Optional[int]=99 , _lowerCamelCase : Dict=16 , _lowerCamelCase : str=2 , _lowerCamelCase : Any=4 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : List[Any]=32 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[Any]=1 , _lowerCamelCase : Dict=0 , _lowerCamelCase : str=0.02 , ):
_UpperCAmelCase : List[str] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Dict = seq_length
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : Optional[Any] = hidden_size
_UpperCAmelCase : Union[str, Any] = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : List[Any] = intermediate_size
_UpperCAmelCase : List[str] = hidden_act
_UpperCAmelCase : Optional[int] = hidden_dropout_prob
_UpperCAmelCase : str = attention_probs_dropout_prob
_UpperCAmelCase : int = max_position_embeddings
_UpperCAmelCase : str = eos_token_id
_UpperCAmelCase : Any = pad_token_id
_UpperCAmelCase : Optional[Any] = bos_token_id
_UpperCAmelCase : Optional[int] = initializer_range
def a__ ( self : Any ):
_UpperCAmelCase : str = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_UpperCAmelCase : Optional[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_UpperCAmelCase : Union[str, Any] = shift_tokens_right(_lowerCamelCase , 1 , 2 )
_UpperCAmelCase : Optional[int] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCamelCase , )
_UpperCAmelCase : str = prepare_blenderbot_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return config, inputs_dict
def a__ ( self : int ):
_UpperCAmelCase ,_UpperCAmelCase : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ ( self : Union[str, Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple ):
_UpperCAmelCase : Dict = 20
_UpperCAmelCase : List[Any] = model_class_name(_lowerCamelCase )
_UpperCAmelCase : Tuple = model.encode(inputs_dict["input_ids"] )
_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_UpperCAmelCase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : List[str] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
_UpperCAmelCase : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase : str = model.decode(
decoder_input_ids[:, :-1] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
_UpperCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_UpperCAmelCase : Dict = model.decode(
decoder_input_ids[:, -1:] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCamelCase , )
_UpperCAmelCase : List[str] = model.decode(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def a__ ( self : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] ):
_UpperCAmelCase : int = 20
_UpperCAmelCase : str = model_class_name(_lowerCamelCase )
_UpperCAmelCase : List[str] = model.encode(inputs_dict["input_ids"] )
_UpperCAmelCase ,_UpperCAmelCase : int = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_UpperCAmelCase : int = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCAmelCase : int = model.init_cache(decoder_input_ids.shape[0] , _lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
_UpperCAmelCase : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_UpperCAmelCase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , _lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
_UpperCAmelCase : List[Any] = model.decode(_lowerCamelCase , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase )
_UpperCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class _UpperCamelCase( unittest.TestCase ):
__A: Any = 99
def a__ ( self : Optional[Any] ):
_UpperCAmelCase : str = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_UpperCAmelCase : Any = input_ids.shape[0]
_UpperCAmelCase : Dict = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def a__ ( self : int ):
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : List[str] = self._get_config_and_data()
_UpperCAmelCase : Tuple = FlaxBlenderbotForConditionalGeneration(_lowerCamelCase )
_UpperCAmelCase : Dict = lm_model(input_ids=_lowerCamelCase )
_UpperCAmelCase : Any = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , _lowerCamelCase )
def a__ ( self : Union[str, Any] ):
_UpperCAmelCase : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_UpperCAmelCase : List[str] = FlaxBlenderbotForConditionalGeneration(_lowerCamelCase )
_UpperCAmelCase : Union[str, Any] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_UpperCAmelCase : Optional[Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_UpperCAmelCase : str = lm_model(input_ids=_lowerCamelCase , decoder_input_ids=_lowerCamelCase )
_UpperCAmelCase : int = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , _lowerCamelCase )
def a__ ( self : List[Any] ):
_UpperCAmelCase : List[str] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_UpperCAmelCase : Optional[int] = shift_tokens_right(_lowerCamelCase , 1 , 2 )
_UpperCAmelCase : List[str] = np.equal(_lowerCamelCase , 1 ).astype(np.floataa ).sum()
_UpperCAmelCase : Optional[int] = np.equal(_lowerCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowerCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _UpperCamelCase( SCREAMING_SNAKE_CASE , unittest.TestCase , SCREAMING_SNAKE_CASE ):
__A: str = True
__A: List[str] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__A: List[Any] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def a__ ( self : List[str] ):
_UpperCAmelCase : Optional[Any] = FlaxBlenderbotModelTester(self )
def a__ ( self : Tuple ):
_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def a__ ( self : List[Any] ):
_UpperCAmelCase ,_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def a__ ( self : Dict ):
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : Optional[Any] = model_class(_lowerCamelCase )
@jax.jit
def encode_jitted(_lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any]=None , **_lowerCamelCase : Tuple ):
return model.encode(input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase )
with self.subTest("JIT Enabled" ):
_UpperCAmelCase : Optional[int] = encode_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCAmelCase : Optional[Any] = encode_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def a__ ( self : List[str] ):
_UpperCAmelCase ,_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase : int = model_class(_lowerCamelCase )
_UpperCAmelCase : int = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
_UpperCAmelCase : Optional[int] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCamelCase : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] ):
return model.decode(
decoder_input_ids=_lowerCamelCase , decoder_attention_mask=_lowerCamelCase , encoder_outputs=_lowerCamelCase , )
with self.subTest("JIT Enabled" ):
_UpperCAmelCase : Tuple = decode_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCAmelCase : List[str] = decode_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def a__ ( self : Tuple ):
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Any = model_class_name.from_pretrained("facebook/blenderbot-400M-distill" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_UpperCAmelCase : Dict = np.ones((1, 1) ) * model.config.eos_token_id
_UpperCAmelCase : List[str] = model(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU." )
@slow
def a__ ( self : Any ):
_UpperCAmelCase : Tuple = {"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
_UpperCAmelCase : List[str] = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
_UpperCAmelCase : Any = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=_lowerCamelCase )
_UpperCAmelCase : Union[str, Any] = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B" )
_UpperCAmelCase : List[str] = ["Sam"]
_UpperCAmelCase : Tuple = tokenizer(_lowerCamelCase , return_tensors="jax" )
_UpperCAmelCase : Union[str, Any] = model.generate(**_lowerCamelCase , **_lowerCamelCase )
_UpperCAmelCase : List[Any] = "Sam is a great name. It means \"sun\" in Gaelic."
_UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase , **_lowerCamelCase )
assert generated_txt[0].strip() == tgt_text
| 328
| 0
|
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCamelCase ( _snake_case : List[Any] ,_snake_case : int ,_snake_case : str=0 ):
'''simple docstring'''
if name is None:
lowercase__ = None
else:
lowercase__ = "." * max(0 ,spaces - 2 ) + "# {:" + str(50 - spaces ) + "s}"
lowercase__ = fmt.format(_snake_case )
# Print and recurse (if needed).
if isinstance(_snake_case ,_snake_case ):
if msg is not None:
print(_snake_case )
for k in val.keys():
recursive_print(_snake_case ,val[k] ,spaces + 2 )
elif isinstance(_snake_case ,torch.Tensor ):
print(_snake_case ,":" ,val.size() )
else:
print(_snake_case ,":" ,_snake_case )
def lowerCamelCase ( _snake_case : Union[str, Any] ,_snake_case : Any ,_snake_case : int ,_snake_case : List[Any] ,_snake_case : List[str] ):
'''simple docstring'''
lowercase__ = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowercase__ = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowercase__ = param.view(*_snake_case )
lowercase__ = param.transpose(0 ,2 )
lowercase__ = param.transpose(1 ,2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowercase__ = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowercase__ = param.view(*_snake_case )
lowercase__ = param.transpose(0 ,1 ).contiguous()
lowercase__ = param.view(*_snake_case )
return param
def lowerCamelCase ( _snake_case : List[Any] ,_snake_case : Union[str, Any] ,_snake_case : str ):
'''simple docstring'''
lowercase__ = {}
# old versions did not store training args
lowercase__ = input_state_dict.get("args" ,_snake_case )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowercase__ = ds_args.padded_vocab_size
lowercase__ = ds_args.max_position_embeddings
lowercase__ = ds_args.hidden_size
lowercase__ = ds_args.num_layers
lowercase__ = ds_args.num_attention_heads
lowercase__ = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowercase__ = config.n_head
# The hidden_size per head.
lowercase__ = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowercase__ = input_state_dict["checkpoint_version"]
else:
lowercase__ = 0.0
# The model.
lowercase__ = input_state_dict["model"]
# The language model.
lowercase__ = model["language_model"]
# The embeddings.
lowercase__ = lm["embedding"]
# The word embeddings.
lowercase__ = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
lowercase__ = word_embeddings[: config.vocab_size, :]
lowercase__ = word_embeddings
# The position embeddings.
lowercase__ = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowercase__ = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
lowercase__ = pos_embeddings
# The transformer.
lowercase__ = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
lowercase__ = re.compile(R"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" )
# The simple map of names for "automated" rules.
lowercase__ = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowercase__ = layer_re.match(_snake_case )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowercase__ = int(m.group(1 ) )
# The name of the operation.
lowercase__ = m.group(2 )
# Is it a weight or a bias?
lowercase__ = m.group(3 )
# The name of the layer.
lowercase__ = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm" ):
lowercase__ = "ln_1" if op_name.startswith("input" ) else "ln_2"
lowercase__ = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowercase__ = torch.tril(torch.ones((n_positions, n_positions) ,dtype=torch.floataa ) ).view(
1 ,1 ,_snake_case ,_snake_case )
lowercase__ = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowercase__ = torch.tensor(-1e4 ,dtype=torch.floataa )
lowercase__ = masked_bias
lowercase__ = fix_query_key_value_ordering(_snake_case ,_snake_case ,3 ,_snake_case ,_snake_case )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowercase__ = out_val.transpose(0 ,1 ).contiguous()
# Store.
lowercase__ = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowercase__ = fix_query_key_value_ordering(_snake_case ,_snake_case ,3 ,_snake_case ,_snake_case )
# Store. No change of shape.
lowercase__ = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowercase__ = megatron_to_transformers[op_name]
lowercase__ = val.transpose(0 ,1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowercase__ = megatron_to_transformers[op_name]
lowercase__ = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowercase__ = transformer["final_layernorm.weight"]
lowercase__ = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
lowercase__ = word_embeddings
# It should be done!
return output_state_dict
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" ,action="store_true" )
parser.add_argument(
"path_to_checkpoint" ,type=_snake_case ,help="Path to the checkpoint file (.zip archive or direct .pt file)" ,)
parser.add_argument(
"--config_file" ,default="" ,type=_snake_case ,help="An optional config json file describing the pre-trained model." ,)
lowercase__ = parser.parse_args()
# Extract the basename.
lowercase__ = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(".zip" ):
with zipfile.ZipFile(args.path_to_checkpoint ,"r" ) as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict:
lowercase__ = torch.load(_snake_case ,map_location="cpu" )
else:
lowercase__ = torch.load(args.path_to_checkpoint ,map_location="cpu" )
lowercase__ = input_state_dict.get("args" ,_snake_case )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowercase__ = "gelu_fast"
elif ds_args.openai_gelu:
lowercase__ = "gelu_new"
else:
lowercase__ = "gelu"
else:
# in the very early days this used to be "gelu_new"
lowercase__ = "gelu_new"
# Spell out all parameters in case the defaults change.
lowercase__ = GPTaConfig(
vocab_size=50_257 ,n_positions=1_024 ,n_embd=1_024 ,n_layer=24 ,n_head=16 ,n_inner=4_096 ,activation_function=_snake_case ,resid_pdrop=0.1 ,embd_pdrop=0.1 ,attn_pdrop=0.1 ,layer_norm_epsilon=1e-5 ,initializer_range=0.02 ,summary_type="cls_index" ,summary_use_proj=_snake_case ,summary_activation=_snake_case ,summary_proj_to_labels=_snake_case ,summary_first_dropout=0.1 ,scale_attn_weights=_snake_case ,use_cache=_snake_case ,bos_token_id=50_256 ,eos_token_id=50_256 ,)
else:
lowercase__ = GPTaConfig.from_json_file(args.config_file )
lowercase__ = ["GPT2LMHeadModel"]
# Convert.
print("Converting" )
lowercase__ = convert_megatron_checkpoint(_snake_case ,_snake_case ,_snake_case )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_snake_case ,_snake_case )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowercase__ = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowercase__ = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
lowercase__ = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
lowercase__ = "gpt2"
lowercase__ = AutoTokenizer.from_pretrained(_snake_case )
lowercase__ = type(_snake_case ).__name__
lowercase__ = tokenizer_class
# Store the config to file.
print("Saving config" )
config.save_pretrained(_snake_case )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(_snake_case )
# Store the state_dict to file.
lowercase__ = os.path.join(_snake_case ,"pytorch_model.bin" )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(_snake_case ,_snake_case )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 267
|
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
SCREAMING_SNAKE_CASE__ = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
SCREAMING_SNAKE_CASE__ = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
SCREAMING_SNAKE_CASE__ = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case (datasets.Metric ):
def _a ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] ,)
def _a ( self ) -> Tuple:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_=None ,UpperCAmelCase_="uniform_average" ,UpperCAmelCase_=True ) -> Tuple:
lowercase__ = mean_squared_error(
UpperCAmelCase_ ,UpperCAmelCase_ ,sample_weight=UpperCAmelCase_ ,multioutput=UpperCAmelCase_ ,squared=UpperCAmelCase_ )
return {"mse": mse}
| 267
| 1
|
from __future__ import annotations
from math import pi, sqrt
def snake_case_ ( snake_case , snake_case ) -> tuple:
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335
|
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : int = DDIMPipeline
__lowercase : Dict = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__lowercase : List[str] = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
__lowercase : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
__lowercase : Any = False
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase__: Dict = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
lowercase__: int = DDIMScheduler()
lowercase__: List[Any] = {'unet': unet, 'scheduler': scheduler}
return components
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> List[str]:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith('mps' ):
lowercase__: Any = torch.manual_seed(lowerCAmelCase__ )
else:
lowercase__: Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowercase__: Any = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: int = 'cpu'
lowercase__: List[str] = self.get_dummy_components()
lowercase__: Union[str, Any] = self.pipeline_class(**lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase__: List[str] = self.get_dummy_inputs(lowerCAmelCase__ )
lowercase__: str = pipe(**lowerCAmelCase__ ).images
lowercase__: List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
lowercase__: Optional[Any] = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
lowercase__: int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase__ , 1E-3 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3E-3 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: Tuple = 'google/ddpm-cifar10-32'
lowercase__: Union[str, Any] = UNetaDModel.from_pretrained(lowerCAmelCase__ )
lowercase__: Optional[Any] = DDIMScheduler()
lowercase__: List[str] = DDIMPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
ddim.to(lowerCAmelCase__ )
ddim.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase__: Optional[Any] = torch.manual_seed(0 )
lowercase__: str = ddim(generator=lowerCAmelCase__ , eta=0.0 , output_type='numpy' ).images
lowercase__: Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__: Optional[Any] = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Tuple = 'google/ddpm-ema-bedroom-256'
lowercase__: int = UNetaDModel.from_pretrained(lowerCAmelCase__ )
lowercase__: Tuple = DDIMScheduler.from_pretrained(lowerCAmelCase__ )
lowercase__: Any = DDIMPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
ddpm.to(lowerCAmelCase__ )
ddpm.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase__: Optional[int] = torch.manual_seed(0 )
lowercase__: Tuple = ddpm(generator=lowerCAmelCase__ , output_type='numpy' ).images
lowercase__: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase__: List[str] = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 335
| 1
|
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ (metaclass=__snake_case ):
__lowerCamelCase : Optional[Any] = ["""flax""", """transformers"""]
def __init__( self , *a , **a):
requires_backends(self , ['flax', 'transformers'])
@classmethod
def snake_case_ ( cls , *a , **a):
requires_backends(cls , ['flax', 'transformers'])
@classmethod
def snake_case_ ( cls , *a , **a):
requires_backends(cls , ['flax', 'transformers'])
class SCREAMING_SNAKE_CASE__ (metaclass=__snake_case ):
__lowerCamelCase : List[str] = ["""flax""", """transformers"""]
def __init__( self , *a , **a):
requires_backends(self , ['flax', 'transformers'])
@classmethod
def snake_case_ ( cls , *a , **a):
requires_backends(cls , ['flax', 'transformers'])
@classmethod
def snake_case_ ( cls , *a , **a):
requires_backends(cls , ['flax', 'transformers'])
class SCREAMING_SNAKE_CASE__ (metaclass=__snake_case ):
__lowerCamelCase : Dict = ["""flax""", """transformers"""]
def __init__( self , *a , **a):
requires_backends(self , ['flax', 'transformers'])
@classmethod
def snake_case_ ( cls , *a , **a):
requires_backends(cls , ['flax', 'transformers'])
@classmethod
def snake_case_ ( cls , *a , **a):
requires_backends(cls , ['flax', 'transformers'])
class SCREAMING_SNAKE_CASE__ (metaclass=__snake_case ):
__lowerCamelCase : int = ["""flax""", """transformers"""]
def __init__( self , *a , **a):
requires_backends(self , ['flax', 'transformers'])
@classmethod
def snake_case_ ( cls , *a , **a):
requires_backends(cls , ['flax', 'transformers'])
@classmethod
def snake_case_ ( cls , *a , **a):
requires_backends(cls , ['flax', 'transformers'])
| 164
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE__ (__snake_case , __snake_case , __snake_case ):
__lowerCamelCase : Optional[int] = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self , a , a , a = None , a = 5_0257 , a = 1024 , a = 768 , a = 12 , a = 12 , a = None , a = "gelu_new" , a = 0.1 , a = 0.1 , a = 0.1 , a = 1e-5 , a = 0.02 , a = True , a = True , a = False , a = False , ):
super().__init__()
lowercase__ : List[str] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""")
lowercase__ : Any = prefix_inner_dim
lowercase__ : List[str] = prefix_hidden_dim
lowercase__ : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim)
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowercase__ : Dict = (
nn.Linear(self.prefix_hidden_dim , a) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowercase__ : Tuple = GPTaConfig(
vocab_size=a , n_positions=a , n_embd=a , n_layer=a , n_head=a , n_inner=a , activation_function=a , resid_pdrop=a , embd_pdrop=a , attn_pdrop=a , layer_norm_epsilon=a , initializer_range=a , scale_attn_weights=a , use_cache=a , scale_attn_by_inverse_layer_idx=a , reorder_and_upcast_attn=a , )
lowercase__ : Tuple = GPTaLMHeadModel(a)
def snake_case_ ( self , a , a , a = None , a = None , ):
lowercase__ : Optional[Any] = self.transformer.transformer.wte(a)
lowercase__ : Optional[Any] = self.encode_prefix(a)
lowercase__ : Union[str, Any] = self.decode_prefix(a)
lowercase__ : List[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1)
if labels is not None:
lowercase__ : Optional[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device)
lowercase__ : Optional[int] = torch.cat((dummy_token, input_ids) , dim=1)
lowercase__ : Optional[Any] = self.transformer(inputs_embeds=a , labels=a , attention_mask=a)
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def snake_case_ ( self , a , a):
return torch.zeros(a , self.prefix_length , dtype=torch.intaa , device=a)
def snake_case_ ( self , a):
return self.encode_prefix(a)
@torch.no_grad()
def snake_case_ ( self , a , a , a):
lowercase__ : List[str] = torch.split(a , 1 , dim=0)
lowercase__ : Optional[Any] = []
lowercase__ : str = []
for feature in features:
lowercase__ : Dict = self.decode_prefix(feature.to(a)) # back to the clip feature
# Only support beam search for now
lowercase__ , lowercase__ : str = self.generate_beam(
input_embeds=a , device=a , eos_token_id=a)
generated_tokens.append(output_tokens[0])
generated_seq_lengths.append(seq_lengths[0])
lowercase__ : str = torch.stack(a)
lowercase__ : List[str] = torch.stack(a)
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def snake_case_ ( self , a=None , a=None , a=None , a = 5 , a = 67 , a = 1.0 , a = None , ):
lowercase__ : Optional[int] = eos_token_id
lowercase__ : List[Any] = None
lowercase__ : int = None
lowercase__ : str = torch.ones(a , device=a , dtype=torch.int)
lowercase__ : List[Any] = torch.zeros(a , device=a , dtype=torch.bool)
if input_embeds is not None:
lowercase__ : int = input_embeds
else:
lowercase__ : int = self.transformer.transformer.wte(a)
for i in range(a):
lowercase__ : Union[str, Any] = self.transformer(inputs_embeds=a)
lowercase__ : Optional[int] = outputs.logits
lowercase__ : Union[str, Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowercase__ : Union[str, Any] = logits.softmax(-1).log()
if scores is None:
lowercase__ , lowercase__ : Tuple = logits.topk(a , -1)
lowercase__ : Dict = generated.expand(a , *generated.shape[1:])
lowercase__ , lowercase__ : Dict = next_tokens.permute(1 , 0), scores.squeeze(0)
if tokens is None:
lowercase__ : Union[str, Any] = next_tokens
else:
lowercase__ : Dict = tokens.expand(a , *tokens.shape[1:])
lowercase__ : Dict = torch.cat((tokens, next_tokens) , dim=1)
else:
lowercase__ : str = -float(np.inf)
lowercase__ : Optional[Any] = 0
lowercase__ : Dict = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowercase__ : str = scores_sum / seq_lengths[:, None]
lowercase__ , lowercase__ : List[str] = scores_sum_average.view(-1).topk(a , -1)
lowercase__ : List[str] = next_tokens // scores_sum.shape[1]
lowercase__ : List[Any] = seq_lengths[next_tokens_source]
lowercase__ : Dict = next_tokens % scores_sum.shape[1]
lowercase__ : Tuple = next_tokens.unsqueeze(1)
lowercase__ : Union[str, Any] = tokens[next_tokens_source]
lowercase__ : Any = torch.cat((tokens, next_tokens) , dim=1)
lowercase__ : List[str] = generated[next_tokens_source]
lowercase__ : Union[str, Any] = scores_sum_average * seq_lengths
lowercase__ : str = is_stopped[next_tokens_source]
lowercase__ : List[Any] = self.transformer.transformer.wte(next_tokens.squeeze()).view(generated.shape[0] , 1 , -1)
lowercase__ : Optional[Any] = torch.cat((generated, next_token_embed) , dim=1)
lowercase__ : Optional[Any] = is_stopped + next_tokens.eq(a).squeeze()
if is_stopped.all():
break
lowercase__ : Dict = scores / seq_lengths
lowercase__ : Optional[Any] = scores.argsort(descending=a)
# tokens tensors are already padded to max_seq_length
lowercase__ : int = [tokens[i] for i in order]
lowercase__ : Optional[int] = torch.stack(a , dim=0)
lowercase__ : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype)
return output_texts, seq_lengths
| 164
| 1
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
UpperCamelCase__ : Optional[List[str]] = None
UpperCamelCase__ : Optional[Any] = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
UpperCamelCase__ : Dict = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCamelCase : bool = True
lowerCamelCase : Optional[str] = None
# Automatically constructed
lowerCamelCase : ClassVar[str] = "PIL.Image.Image"
lowerCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCamelCase : str = field(default='Image' , init=A_ , repr=A_ )
def __call__( self : List[Any] ):
'''simple docstring'''
return self.pa_type
def SCREAMING_SNAKE_CASE ( self : Dict , __lowercase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(__lowercase , __lowercase ):
UpperCAmelCase_ = np.array(__lowercase )
if isinstance(__lowercase , __lowercase ):
return {"path": value, "bytes": None}
elif isinstance(__lowercase , __lowercase ):
return {"path": None, "bytes": value}
elif isinstance(__lowercase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__lowercase )
elif isinstance(__lowercase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__lowercase )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowercase : dict , __lowercase : Dict=None ):
'''simple docstring'''
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
UpperCAmelCase_ = {}
UpperCAmelCase_ , UpperCAmelCase_ = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(__lowercase ):
UpperCAmelCase_ = PIL.Image.open(__lowercase )
else:
UpperCAmelCase_ = path.split("""::""" )[-1]
try:
UpperCAmelCase_ = string_to_dict(__lowercase , config.HUB_DATASETS_URL )["""repo_id"""]
UpperCAmelCase_ = token_per_repo_id.get(__lowercase )
except ValueError:
UpperCAmelCase_ = None
with xopen(__lowercase , """rb""" , use_auth_token=__lowercase ) as f:
UpperCAmelCase_ = BytesIO(f.read() )
UpperCAmelCase_ = PIL.Image.open(bytes_ )
else:
UpperCAmelCase_ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def SCREAMING_SNAKE_CASE ( self : Dict , __lowercase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
'''simple docstring'''
if pa.types.is_string(storage.type ):
UpperCAmelCase_ = pa.array([None] * len(__lowercase ) , type=pa.binary() )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ = pa.array([None] * len(__lowercase ) , type=pa.string() )
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
UpperCAmelCase_ = storage.field("""bytes""" )
else:
UpperCAmelCase_ = pa.array([None] * len(__lowercase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
UpperCAmelCase_ = storage.field("""path""" )
else:
UpperCAmelCase_ = pa.array([None] * len(__lowercase ) , type=pa.string() )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCAmelCase_ = pa.array(
[encode_np_array(np.array(__lowercase ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
UpperCAmelCase_ = pa.array([None] * len(__lowercase ) , type=pa.string() )
UpperCAmelCase_ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(__lowercase , self.pa_type )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowercase : pa.StructArray ):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(__lowercase : Dict ):
with xopen(__lowercase , """rb""" ) as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(__lowercase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(__lowercase , self.pa_type )
def A_( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCAmelCase_ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def A_( A ):
UpperCAmelCase_ = BytesIO()
if image.format in list_image_compression_formats():
UpperCAmelCase_ = image.format
else:
UpperCAmelCase_ = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(A , format=A )
return buffer.getvalue()
def A_( A ):
if hasattr(A , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(A )}
def A_( A ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
UpperCAmelCase_ = array.dtype
UpperCAmelCase_ = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
UpperCAmelCase_ = dtype.kind
UpperCAmelCase_ = dtype.itemsize
UpperCAmelCase_ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCAmelCase_ = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCAmelCase_ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCAmelCase_ = dtype_byteorder + dtype_kind + str(A )
UpperCAmelCase_ = np.dtype(A )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
UpperCAmelCase_ = PIL.Image.fromarray(array.astype(A ) )
return {"path": None, "bytes": image_to_bytes(A )}
def A_( A ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
UpperCAmelCase_ , UpperCAmelCase_ = first_non_null_value(A )
if isinstance(A , A ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(A , np.ndarray ):
UpperCAmelCase_ = no_op_if_value_is_null(A )
return [obj_to_image_dict_func(A ) for obj in objs]
elif isinstance(A , PIL.Image.Image ):
UpperCAmelCase_ = no_op_if_value_is_null(A )
return [obj_to_image_dict_func(A ) for obj in objs]
else:
return objs
else:
return objs
| 486
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : List[Any] = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[str] = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 486
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'vocab_file': 'sentencepiece.model'}
UpperCamelCase = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
UpperCamelCase = {
'google/rembert': 256,
}
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Any="[CLS]" , SCREAMING_SNAKE_CASE__ : Optional[int]="[SEP]" , SCREAMING_SNAKE_CASE__ : Dict="[UNK]" , SCREAMING_SNAKE_CASE__ : Optional[Any]="[SEP]" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[PAD]" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[CLS]" , SCREAMING_SNAKE_CASE__ : List[Any]="[MASK]" , **SCREAMING_SNAKE_CASE__ : str , ) -> Dict:
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE__ , remove_space=SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = remove_space
lowerCAmelCase__ = keep_accents
lowerCAmelCase__ = vocab_file
lowerCAmelCase__ = spm.SentencePieceProcessor()
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def a ( self : int ) -> Union[str, Any]:
return len(self.sp_model )
def a ( self : Any ) -> str:
lowerCAmelCase__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) -> List[str]:
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase__ = d
lowerCAmelCase__ = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=False ) -> Optional[int]:
lowerCAmelCase__ = self.sp_model.EncodeAsPieces(SCREAMING_SNAKE_CASE__ )
return pieces
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]:
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> int:
lowerCAmelCase__ = self.sp_model.decode_pieces(SCREAMING_SNAKE_CASE__ )
return out_string
def a ( self : int , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error("Vocabulary path ({}) should be a directory".format(SCREAMING_SNAKE_CASE__ ) )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 61
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = """ylacombe/bark-small"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = """en_speaker_1"""
lowercase__ = """This is a test string"""
lowercase__ = """speaker_embeddings_path.json"""
lowercase__ = """speaker_embeddings"""
def lowerCamelCase__ (self : str , **_UpperCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase )
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = BarkProcessor(tokenizer=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase__ (self : str ) -> Tuple:
"""simple docstring"""
lowercase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase__ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase__ = 35
lowercase__ = 2
lowercase__ = 8
lowercase__ = {
"""semantic_prompt""": np.ones(_UpperCAmelCase ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase__ = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
lowercase__ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase__ = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
lowercase__ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase__ = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase__ (self : int ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = BarkProcessor(tokenizer=_UpperCAmelCase )
lowercase__ = processor(text=self.input_string )
lowercase__ = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 15
| 0
|
"""simple docstring"""
from manim import *
class __a (UpperCamelCase_):
'''simple docstring'''
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE__ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE__ : List[str] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ : List[str] = VGroup(*_a ).arrange(_a , buff=0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VGroup(*_a ).arrange(_a , buff=0 )
SCREAMING_SNAKE_CASE__ : Tuple = VGroup(_a , _a ).arrange(_a , buff=0 )
SCREAMING_SNAKE_CASE__ : int = Text("""CPU""" , font_size=24 )
SCREAMING_SNAKE_CASE__ : List[str] = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
SCREAMING_SNAKE_CASE__ : List[Any] = [mem.copy() for i in range(1 )]
SCREAMING_SNAKE_CASE__ : str = VGroup(*_a ).arrange(_a , buff=0 )
SCREAMING_SNAKE_CASE__ : List[str] = Text("""GPU""" , font_size=24 )
SCREAMING_SNAKE_CASE__ : List[Any] = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
gpu.align_to(_a , _a )
gpu.set_x(gpu.get_x() - 1 )
self.add(_a )
SCREAMING_SNAKE_CASE__ : List[str] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ : Optional[int] = VGroup(*_a ).arrange(_a , buff=0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Text("""Model""" , font_size=24 )
SCREAMING_SNAKE_CASE__ : int = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.play(
Create(_a , run_time=1 ) , Create(_a , run_time=1 ) , Create(_a , run_time=1 ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
SCREAMING_SNAKE_CASE__ : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE__ : int = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_a , run_time=2.5 ) , Write(_a ) , Write(_a ) )
self.add(_a )
SCREAMING_SNAKE_CASE__ : List[str] = []
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for i, rect in enumerate(_a ):
SCREAMING_SNAKE_CASE__ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.7 )
cpu_target.move_to(_a )
cpu_target.generate_target()
SCREAMING_SNAKE_CASE__ : Optional[int] = 0.46 / 4
SCREAMING_SNAKE_CASE__ : List[Any] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_a )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_a , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_a , buff=0.0 )
cpu_targs.append(_a )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_a ) )
second_animations.append(MoveToTarget(_a , run_time=1.5 ) )
self.play(*_a )
self.play(*_a )
self.wait()
| 12
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __a (unittest.TestCase):
'''simple docstring'''
@slow
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
SCREAMING_SNAKE_CASE__ : Any = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_a )["""last_hidden_state"""]
SCREAMING_SNAKE_CASE__ : List[str] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _a )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 12
| 1
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowerCAmelCase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: List[str] , _lowerCamelCase: str = None ):
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
__SCREAMING_SNAKE_CASE : Optional[int] = quote(_lowerCamelCase )
return hfh.hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" , revision=_lowerCamelCase )
| 578
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase_ ( a_ ):
_A : Optional[int] = 'facebook/bart-large-mnli'
_A : Union[str, Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
_A : Dict = 'text_classifier'
_A : Union[str, Any] = AutoTokenizer
_A : Tuple = AutoModelForSequenceClassification
_A : Optional[int] = ['text', ['text']]
_A : Dict = ['text']
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
super().setup()
UpperCAmelCase = self.model.config
UpperCAmelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
UpperCAmelCase = int(snake_case__ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = labels
return self.pre_processor(
[text] * len(snake_case__ ) , [f'''This example is {label}''' for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def UpperCamelCase_ ( self , snake_case__ ) -> str:
"""simple docstring"""
UpperCAmelCase = outputs.logits
UpperCAmelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 673
| 0
|
'''simple docstring'''
import math
from collections.abc import Callable
def __magic_name__( _A , _A , _A ):
'''simple docstring'''
UpperCamelCase__ = xa
UpperCamelCase__ = xa
while True:
if x_n == x_na or function(snake_case__ ) == function(snake_case__ ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
UpperCamelCase__ = x_na - (
function(snake_case__ ) / ((function(snake_case__ ) - function(snake_case__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
UpperCamelCase__ = x_na
UpperCamelCase__ = x_na
def __magic_name__( _A ):
'''simple docstring'''
return math.pow(snake_case__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 706
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__a : str = field(default="audio-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
__a : ClassVar[Features] = Features({"audio": Audio()} )
__a : ClassVar[Features] = Features({"labels": ClassLabel} )
__a : str = "audio"
__a : str = "labels"
def A ( self : List[Any] , lowercase : List[Any] ) -> Any:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , lowercase ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def A ( self : int ) -> Dict[str, str]:
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 265
| 0
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Tuple = logging.get_logger()
@dataclass
class SCREAMING_SNAKE_CASE__ :
snake_case__ : Optional[Any] = 42
snake_case__ : Optional[Any] = field(default_factory=snake_case__ )
snake_case__ : List[Any] = field(default_factory=snake_case__ )
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : Tensor ) -> str:
a_ : Dict = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCAmelCase__ )
def __call__( self : List[str] , SCREAMING_SNAKE_CASE__ : Tensor ) -> List[Any]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCAmelCase__ )
[x.remove() for x in self.handles]
return self
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
return list(filter(lambda SCREAMING_SNAKE_CASE__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class SCREAMING_SNAKE_CASE__ :
snake_case__ : Tuple = 42
snake_case__ : str = 42
snake_case__ : Optional[Any] = 0
snake_case__ : Tuple = field(default_factory=snake_case__ )
snake_case__ : int = field(default_factory=snake_case__ )
def __call__( self : List[str] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Union[str, Any]:
a_ : Dict = Tracker(self.dest )(lowerCAmelCase__ ).parametrized
a_ : int = Tracker(self.src )(lowerCAmelCase__ ).parametrized
a_ : Union[str, Any] = list(filter(lambda SCREAMING_SNAKE_CASE__ : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) )
a_ : Optional[int] = list(filter(lambda SCREAMING_SNAKE_CASE__ : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise Exception(
F"""Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while"""
F""" destination module has {len(lowerCAmelCase__ )}.""" )
for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"""Transfered from={src_m} to={dest_m}""" )
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : int , __A : List[str] , __A : Any = True ) -> Tuple:
"""simple docstring"""
print(F"""Converting {name}...""" )
with torch.no_grad():
a_ : Optional[int] = timm.create_model(snake_case__ , pretrained=snake_case__ ).eval()
a_ : Any = ResNetForImageClassification(snake_case__ ).eval()
a_ : List[Any] = ModuleTransfer(src=snake_case__ , dest=snake_case__ )
a_ : str = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(snake_case__ )
assert torch.allclose(from_model(snake_case__ ) , our_model(snake_case__ ).logits ), "The model logits don't match the original one."
a_ : Dict = F"""resnet{"-".join(name.split("resnet" ) )}"""
print(snake_case__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=snake_case__ , )
# we can use the convnext one
a_ : Dict = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=snake_case__ , )
print(F"""Pushed {checkpoint_name}""" )
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Dict = None , __A : Optional[int] = True ) -> Any:
"""simple docstring"""
a_ : Tuple = "imagenet-1k-id2label.json"
a_ : Dict = 10_00
a_ : Tuple = (1, num_labels)
a_ : List[Any] = "huggingface/label-files"
a_ : Optional[Any] = num_labels
a_ : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
a_ : Optional[int] = {int(snake_case__ ): v for k, v in idalabel.items()}
a_ : List[str] = idalabel
a_ : List[str] = {v: k for k, v in idalabel.items()}
a_ : Tuple = partial(snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ )
a_ : Optional[int] = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='basic' ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='basic' ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(snake_case__ , names_to_config[model_name] , snake_case__ , snake_case__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return config, expected_shape
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
UpperCAmelCase_ : Tuple = parser.parse_args()
UpperCAmelCase_ : int = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 570
|
import os
_lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Any = 0
while index < len(snake_case__) - 1:
lowerCAmelCase_ : Optional[Any] = SYMBOLS[numerals[index]]
lowerCAmelCase_ : int = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[int] = ""
lowerCAmelCase_ : Tuple = num // 10_00
numerals += m_count * "M"
num %= 10_00
lowerCAmelCase_ : int = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
lowerCAmelCase_ : int = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCamelCase ( snake_case__ = "/p089_roman.txt"):
lowerCAmelCase_ : int = 0
with open(os.path.dirname(snake_case__) + roman_numerals_filename) as filea:
lowerCAmelCase_ : List[Any] = filea.readlines()
for line in lines:
lowerCAmelCase_ : Any = line.strip()
lowerCAmelCase_ : Tuple = parse_roman_numerals(snake_case__)
lowerCAmelCase_ : List[Any] = generate_roman_numerals(snake_case__)
savings += len(snake_case__) - len(snake_case__)
return savings
if __name__ == "__main__":
print(f"{solution() = }")
| 659
| 0
|
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_lowercase : int = HfArgumentParser(InitializationArguments)
_lowercase : Optional[int] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_lowercase : Dict = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
_lowercase : str = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_lowercase : int = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 546
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _UpperCamelCase :
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> Any:
return self.get_dummy_input()
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' )
def _UpperCAmelCase ( self , a__=True , a__=False , a__=False , a__=False , ) -> Optional[Any]:
A = 4
A = 32
A = (32, 32)
A = torch.manual_seed(0 )
A = torch.device(a__ )
A = (batch_size, num_channels) + sizes
A = randn_tensor(a__ , generator=a__ , device=a__ )
A = {"""hidden_states""": hidden_states}
if include_temb:
A = 128
A = randn_tensor((batch_size, temb_channels) , generator=a__ , device=a__ )
if include_res_hidden_states_tuple:
A = torch.manual_seed(1 )
A = (randn_tensor(a__ , generator=a__ , device=a__ ),)
if include_encoder_hidden_states:
A = floats_tensor((batch_size, 32, 32) ).to(a__ )
if include_skip_sample:
A = randn_tensor(((batch_size, 3) + sizes) , generator=a__ , device=a__ )
return dummy_input
def _UpperCAmelCase ( self ) -> int:
A = {
"""in_channels""": 32,
"""out_channels""": 32,
"""temb_channels""": 128,
}
if self.block_type == "up":
A = 32
if self.block_type == "mid":
init_dict.pop("""out_channels""" )
A = self.dummy_input
return init_dict, inputs_dict
def _UpperCAmelCase ( self , a__ ) -> Optional[int]:
A , A = self.prepare_init_args_and_inputs_for_common()
A = self.block_class(**a__ )
unet_block.to(a__ )
unet_block.eval()
with torch.no_grad():
A = unet_block(**a__ )
if isinstance(a__ , a__ ):
A = output[0]
self.assertEqual(output.shape , self.output_shape )
A = output[0, -1, -3:, -3:]
A = torch.tensor(a__ ).to(a__ )
assert torch_all_close(output_slice.flatten() , a__ , atol=5e-3 )
@unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""" )
def _UpperCAmelCase ( self ) -> str:
A , A = self.prepare_init_args_and_inputs_for_common()
A = self.block_class(**a__ )
model.to(a__ )
model.train()
A = model(**a__ )
if isinstance(a__ , a__ ):
A = output[0]
A = torch.device(a__ )
A = randn_tensor(output.shape , device=a__ )
A = torch.nn.functional.mse_loss(a__ , a__ )
loss.backward()
| 546
| 1
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
UpperCamelCase : Any = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase : Any = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase : Tuple = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase : Optional[int] = model(__snake_case )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , __snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __snake_case , atol=1e-3 ) )
@slow
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
UpperCamelCase : Optional[Any] = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase : Any = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase : Tuple = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase : Union[str, Any] = model(__snake_case )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , __snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __snake_case , atol=1e-3 ) )
| 102
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = """▁"""
A__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
A__ = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
A__ = {
"""xlm-roberta-base""": 512,
"""xlm-roberta-large""": 512,
"""xlm-roberta-large-finetuned-conll02-dutch""": 512,
"""xlm-roberta-large-finetuned-conll02-spanish""": 512,
"""xlm-roberta-large-finetuned-conll03-english""": 512,
"""xlm-roberta-large-finetuned-conll03-german""": 512,
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self : Tuple , __snake_case : Any , __snake_case : str="<s>" , __snake_case : Dict="</s>" , __snake_case : List[Any]="</s>" , __snake_case : str="<s>" , __snake_case : Tuple="<unk>" , __snake_case : int="<pad>" , __snake_case : List[str]="<mask>" , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase :int = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
lowerCamelCase :str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
lowerCamelCase :Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__snake_case ) )
lowerCamelCase :List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase :Tuple = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase :Tuple = 1
lowerCamelCase :Dict = len(self.sp_model ) + self.fairseq_offset
lowerCamelCase :Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Optional[Any] ):
lowerCamelCase :Optional[Any] = self.__dict__.copy()
lowerCamelCase :int = None
lowerCamelCase :List[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict , __snake_case : Optional[Any] ):
lowerCamelCase :str = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase :List[str] = {}
lowerCamelCase :str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase :Optional[int] = [self.cls_token_id]
lowerCamelCase :Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1]
def snake_case ( self : str , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
lowerCamelCase :Dict = [self.sep_token_id]
lowerCamelCase :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case ( self : int ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def snake_case ( self : Optional[int] ):
lowerCamelCase :int = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case ( self : List[str] , __snake_case : str ):
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def snake_case ( self : Tuple , __snake_case : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase :Dict = self.sp_model.PieceToId(__snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case ( self : Any , __snake_case : Any ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case ( self : Dict , __snake_case : List[str] ):
lowerCamelCase :Optional[Any] = ''''''.join(__snake_case ).replace(__snake_case , ''' ''' ).strip()
return out_string
def snake_case ( self : Optional[Any] , __snake_case : str , __snake_case : Optional[str] = None ):
if not os.path.isdir(__snake_case ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCamelCase :Tuple = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , '''wb''' ) as fi:
lowerCamelCase :Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
| 166
| 0
|
'''simple docstring'''
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any:
lowerCAmelCase__ : Tuple = name
lowerCAmelCase__ : Optional[Any] = value
lowerCAmelCase__ : str = weight
def __repr__( self ) -> Union[str, Any]:
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def UpperCAmelCase_ ( self ) -> Any:
return self.value
def UpperCAmelCase_ ( self ) -> int:
return self.name
def UpperCAmelCase_ ( self ) -> List[str]:
return self.weight
def UpperCAmelCase_ ( self ) -> Dict:
return self.value / self.weight
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Dict = []
for i in range(len(UpperCamelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Dict = sorted(UpperCamelCase , key=UpperCamelCase , reverse=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ , lowerCAmelCase__ : Any = 0.0, 0.0
for i in range(len(UpperCamelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 160
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCAmelCase = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 160
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowercase__ :Tuple = TypeVar('T')
class snake_case ( Generic[T] ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowercase : T ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = data
__UpperCAmelCase : Node[T] | None = None
def __str__( self : int ):
'''simple docstring'''
return f'''{self.data}'''
class snake_case ( Generic[T] ):
'''simple docstring'''
def __init__( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Node[T] | None = None
def __iter__( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.top
while node:
yield node.data
__UpperCAmelCase : Dict = node.next
def __str__( self : Any ):
'''simple docstring'''
return "->".join([str(__lowercase ) for item in self] )
def __len__( self : int ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def A_ ( self : Tuple ):
'''simple docstring'''
return self.top is None
def A_ ( self : List[str] , __lowercase : T ):
'''simple docstring'''
__UpperCAmelCase : int = Node(__lowercase )
if not self.is_empty():
__UpperCAmelCase : int = self.top
__UpperCAmelCase : Tuple = node
def A_ ( self : List[str] ):
'''simple docstring'''
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , __lowercase )
__UpperCAmelCase : List[str] = self.top
__UpperCAmelCase : List[str] = self.top.next
return pop_node.data
def A_ ( self : str ):
'''simple docstring'''
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def A_ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 522
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowercase__ :Tuple = TypeVar('T')
class snake_case ( Generic[T] ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowercase : T ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = data
__UpperCAmelCase : Node[T] | None = None
def __str__( self : int ):
'''simple docstring'''
return f'''{self.data}'''
class snake_case ( Generic[T] ):
'''simple docstring'''
def __init__( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Node[T] | None = None
def __iter__( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.top
while node:
yield node.data
__UpperCAmelCase : Dict = node.next
def __str__( self : Any ):
'''simple docstring'''
return "->".join([str(__lowercase ) for item in self] )
def __len__( self : int ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def A_ ( self : Tuple ):
'''simple docstring'''
return self.top is None
def A_ ( self : List[str] , __lowercase : T ):
'''simple docstring'''
__UpperCAmelCase : int = Node(__lowercase )
if not self.is_empty():
__UpperCAmelCase : int = self.top
__UpperCAmelCase : Tuple = node
def A_ ( self : List[str] ):
'''simple docstring'''
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , __lowercase )
__UpperCAmelCase : List[str] = self.top
__UpperCAmelCase : List[str] = self.top.next
return pop_node.data
def A_ ( self : str ):
'''simple docstring'''
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def A_ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 522
| 1
|
lowerCAmelCase = 256
# Modulus to hash a string
lowerCAmelCase = 1_000_003
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
__UpperCAmelCase : List[str] = len(lowercase_ )
__UpperCAmelCase : Tuple = len(lowercase_ )
if p_len > t_len:
return False
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase_ ):
__UpperCAmelCase : List[str] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCAmelCase : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCAmelCase : Any = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCAmelCase : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''abc1abc12'''
__UpperCAmelCase : List[str] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__UpperCAmelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(lowercase_ , lowercase_ ) and not rabin_karp(lowercase_ , lowercase_ )
# Test 2)
__UpperCAmelCase : Union[str, Any] = '''ABABX'''
__UpperCAmelCase : List[Any] = '''ABABZABABYABABX'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 3)
__UpperCAmelCase : str = '''AAAB'''
__UpperCAmelCase : List[Any] = '''ABAAAAAB'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 4)
__UpperCAmelCase : Optional[Any] = '''abcdabcy'''
__UpperCAmelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 5)
__UpperCAmelCase : Any = '''Lü'''
__UpperCAmelCase : Optional[int] = '''Lüsai'''
assert rabin_karp(lowercase_ , lowercase_ )
__UpperCAmelCase : List[Any] = '''Lue'''
assert not rabin_karp(lowercase_ , lowercase_ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 713
|
from string import ascii_uppercase
lowerCAmelCase = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase = dict(enumerate(ascii_uppercase))
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[Any] = len(lowercase_ )
__UpperCAmelCase : int = 0
while True:
if x == i:
__UpperCAmelCase : List[str] = 0
if len(lowercase_ ) == len(lowercase_ ):
break
key += key[i]
i += 1
return key
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = ''''''
__UpperCAmelCase : List[str] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__UpperCAmelCase : Optional[int] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ''''''
__UpperCAmelCase : List[str] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__UpperCAmelCase : int = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''THE GERMAN ATTACK'''
__UpperCAmelCase : List[Any] = '''SECRET'''
__UpperCAmelCase : Optional[int] = generate_key(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = cipher_text(lowercase_ , lowercase_ )
print(f"Encrypted Text = {s}" )
print(f"Original Text = {original_text(lowercase_ , lowercase_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 675
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase_ : List[Any] = """platform"""
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _A (__a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , __a=None , ) -> Optional[Any]:
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE_ : Any = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : str , lowercase_ : int , lowercase_ : Any=13 , lowercase_ : Any=7 , lowercase_ : Any=True , lowercase_ : Any=False , lowercase_ : Union[str, Any]=99 , lowercase_ : Optional[Any]=16 , lowercase_ : str=2 , lowercase_ : Dict=4 , lowercase_ : int=4 , lowercase_ : int="gelu" , lowercase_ : str=0.1 , lowercase_ : str=0.1 , lowercase_ : List[str]=32 , lowercase_ : Any=2 , lowercase_ : Optional[int]=1 , lowercase_ : Tuple=0 , lowercase_ : Optional[Any]=0.02 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : List[str] = seq_length
SCREAMING_SNAKE_CASE_ : Tuple = is_training
SCREAMING_SNAKE_CASE_ : List[Any] = use_labels
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[Any] = eos_token_id
SCREAMING_SNAKE_CASE_ : Any = pad_token_id
SCREAMING_SNAKE_CASE_ : str = bos_token_id
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
SCREAMING_SNAKE_CASE_ : Any = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
SCREAMING_SNAKE_CASE_ : List[Any] = shift_tokens_right(lowercase_ , 1 , 2)
SCREAMING_SNAKE_CASE_ : str = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowercase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_blenderbot_inputs_dict(lowercase_ , lowercase_ , lowercase_)
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : str , lowercase_ : int , lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = 20
SCREAMING_SNAKE_CASE_ : Tuple = model_class_name(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = model.encode(inputs_dict['''input_ids'''])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
SCREAMING_SNAKE_CASE_ : Tuple = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : str = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''')
SCREAMING_SNAKE_CASE_ : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE_ : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''')
SCREAMING_SNAKE_CASE_ : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = model.decode(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}')
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : int , lowercase_ : int , lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = 20
SCREAMING_SNAKE_CASE_ : List[str] = model_class_name(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = model.encode(inputs_dict['''input_ids'''])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
SCREAMING_SNAKE_CASE_ : Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE_ : Tuple = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''')
SCREAMING_SNAKE_CASE_ : int = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase_ , decoder_position_ids=lowercase_ , )
SCREAMING_SNAKE_CASE_ : str = model.decode(lowercase_ , lowercase_ , decoder_attention_mask=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}')
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = 9_9
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_ids.shape[0]
SCREAMING_SNAKE_CASE_ : Tuple = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self._get_config_and_data()
SCREAMING_SNAKE_CASE_ : str = FlaxBlenderbotForConditionalGeneration(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = lm_model(input_ids=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa)
SCREAMING_SNAKE_CASE_ : int = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = lm_model(input_ids=lowercase_ , decoder_input_ids=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa)
SCREAMING_SNAKE_CASE_ : Dict = shift_tokens_right(lowercase_ , 1 , 2)
SCREAMING_SNAKE_CASE_ : Dict = np.equal(lowercase_ , 1).astype(np.floataa).sum()
SCREAMING_SNAKE_CASE_ : str = np.equal(lowercase_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(lowercase_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase , UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = True
__UpperCamelCase = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__UpperCamelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = FlaxBlenderbotModelTester(self)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase_ , lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase_ , lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : int = model_class(lowercase_)
@jax.jit
def encode_jitted(lowercase_ : Optional[int] , lowercase_ : Any=None , **lowercase_ : Dict):
return model.encode(input_ids=lowercase_ , attention_mask=lowercase_)
with self.subTest('''JIT Enabled'''):
SCREAMING_SNAKE_CASE_ : Dict = encode_jitted(**lowercase_).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ : Tuple = encode_jitted(**lowercase_).to_tuple()
self.assertEqual(len(lowercase_) , len(lowercase_))
for jitted_output, output in zip(lowercase_ , lowercase_):
self.assertEqual(jitted_output.shape , output.shape)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
SCREAMING_SNAKE_CASE_ : int = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : int = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''])
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase_ : List[Any] , lowercase_ : int , lowercase_ : int):
return model.decode(
decoder_input_ids=lowercase_ , decoder_attention_mask=lowercase_ , encoder_outputs=lowercase_ , )
with self.subTest('''JIT Enabled'''):
SCREAMING_SNAKE_CASE_ : Dict = decode_jitted(**lowercase_).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ : int = decode_jitted(**lowercase_).to_tuple()
self.assertEqual(len(lowercase_) , len(lowercase_))
for jitted_output, output in zip(lowercase_ , lowercase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''')
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = np.ones((1, 1)) * model.config.eos_token_id
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowercase_)
self.assertIsNotNone(lowercase_)
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''')
@slow
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
SCREAMING_SNAKE_CASE_ : int = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
SCREAMING_SNAKE_CASE_ : Dict = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''')
SCREAMING_SNAKE_CASE_ : str = ['''Sam''']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer(lowercase_ , return_tensors='''jax''')
SCREAMING_SNAKE_CASE_ : List[str] = model.generate(**lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Any = '''Sam is a great name. It means "sun" in Gaelic.'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.batch_decode(lowercase_ , **lowercase_)
assert generated_txt[0].strip() == tgt_text
| 512
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["torch", "torchsde"]
def __init__( self : Dict , *lowercase_ : Tuple , **lowercase_ : Dict):
'''simple docstring'''
requires_backends(self , ['''torch''', '''torchsde'''])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] , *lowercase_ : int , **lowercase_ : Optional[Any]):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] , *lowercase_ : List[Any] , **lowercase_ : Tuple):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''])
| 512
| 1
|
'''simple docstring'''
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class A ( _a ,unittest.TestCase ):
lowercase_ = TransfoXLTokenizer
lowercase_ = False
lowercase_ = False
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
super().setUp()
_a = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self : Dict , **lowerCAmelCase_ : Optional[int] ) -> Tuple:
"""simple docstring"""
_a = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
_a = '''<unk> UNwanted , running'''
_a = '''<unk> unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_a = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowerCAmelCase_ )
_a = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(lowerCAmelCase_ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [0, 4, 8, 7] )
def __lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
_a = TransfoXLTokenizer(lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def __lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_a = TransfoXLTokenizer(lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = TransfoXLTokenizer(lower_case=lowerCAmelCase_ )
_a = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
_a = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(tokenizer.convert_tokens_to_string(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_a = self.get_tokenizer()
_a = len(lowerCAmelCase_ )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowerCAmelCase_ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 377
|
'''simple docstring'''
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if n == 1 or not isinstance(UpperCamelCase , UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
_a = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
_a = 0
_a = 2
while digits < n:
index += 1
_a = len(str(fibonacci(UpperCamelCase ) ) )
return index
def snake_case_ (UpperCamelCase : int = 1000 ):
'''simple docstring'''
return fibonacci_digits_index(UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 377
| 1
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def a (lowerCAmelCase__ = True , *lowerCAmelCase__ , **lowerCAmelCase__ ):
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
__a = False
if main_process_only:
__a = PartialState().local_process_index == 0
return _tqdm(*lowerCAmelCase__ , **lowerCAmelCase__ , disable=lowerCAmelCase__ )
| 99
|
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , __A ):
__a = set_counts
__a = max(__A )
__a = len(__A )
__a = [1] * num_sets
__a = list(range(__A ) )
def snake_case_ ( self , __A , __A ):
__a = self.get_parent(__A )
__a = self.get_parent(__A )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__a = 0
__a = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__a = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__a = 0
__a = src_parent
__a = self.set_counts[src_parent]
__a = max(self.max_set , __A )
return True
def snake_case_ ( self , __A ):
if self.parents[disj_set] == disj_set:
return disj_set
__a = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 99
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase : List[str] = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def A__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=8 ):
_SCREAMING_SNAKE_CASE = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_SCREAMING_SNAKE_CASE = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __snake_case( __A ):
def __init__( self , A_ , A_ , A_ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=A_ , scheduler=A_ , movq=A_ , )
_SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def A ( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if latents is None:
_SCREAMING_SNAKE_CASE = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_SCREAMING_SNAKE_CASE = latents.to(A_ )
_SCREAMING_SNAKE_CASE = latents * scheduler.init_noise_sigma
return latents
def A ( self , A_=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_SCREAMING_SNAKE_CASE = torch.device(F'''cuda:{gpu_id}''' )
_SCREAMING_SNAKE_CASE = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
def A ( self , A_=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
_SCREAMING_SNAKE_CASE = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=A_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.unet, self.movq]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = cpu_offload_with_hook(A_ , A_ , prev_module_hook=A_ )
# We'll offload the last model manually.
_SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A ( self ):
'''simple docstring'''
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self , A_ , A_ , A_ , A_ = 512 , A_ = 512 , A_ = 100 , A_ = 4.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._execution_device
_SCREAMING_SNAKE_CASE = guidance_scale > 1.0
if isinstance(A_ , A_ ):
_SCREAMING_SNAKE_CASE = torch.cat(A_ , dim=0 )
if isinstance(A_ , A_ ):
_SCREAMING_SNAKE_CASE = torch.cat(A_ , dim=0 )
if isinstance(A_ , A_ ):
_SCREAMING_SNAKE_CASE = torch.cat(A_ , dim=0 )
_SCREAMING_SNAKE_CASE = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(A_ , dim=0 )
_SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(A_ , dim=0 )
_SCREAMING_SNAKE_CASE = hint.repeat_interleave(A_ , dim=0 )
_SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A_ )
_SCREAMING_SNAKE_CASE = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=A_ )
self.scheduler.set_timesteps(A_ , device=A_ )
_SCREAMING_SNAKE_CASE = self.scheduler.timesteps
_SCREAMING_SNAKE_CASE = self.movq.config.latent_channels
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = downscale_height_and_width(A_ , A_ , self.movq_scale_factor )
# create initial latent
_SCREAMING_SNAKE_CASE = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A_ , A_ , A_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
_SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_SCREAMING_SNAKE_CASE = {'''image_embeds''': image_embeds, '''hint''': hint}
_SCREAMING_SNAKE_CASE = self.unet(
sample=A_ , timestep=A_ , encoder_hidden_states=A_ , added_cond_kwargs=A_ , return_dict=A_ , )[0]
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
_SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_SCREAMING_SNAKE_CASE = self.scheduler.step(
A_ , A_ , A_ , generator=A_ , )[0]
# post-processing
_SCREAMING_SNAKE_CASE = self.movq.decode(A_ , force_not_quantize=A_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
_SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_SCREAMING_SNAKE_CASE = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 700
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCamelCase : str = False
class __snake_case( unittest.TestCase ):
def A ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A ( self ):
'''simple docstring'''
return 12
@property
def A ( self ):
'''simple docstring'''
return 12
@property
def A ( self ):
'''simple docstring'''
return 32
@property
def A ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def A ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(A_ )
@property
def A ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = 12
_SCREAMING_SNAKE_CASE = 12
_SCREAMING_SNAKE_CASE = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
_SCREAMING_SNAKE_CASE = TransformeraDModel(**A_ )
return model
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''cpu'''
_SCREAMING_SNAKE_CASE = self.dummy_vqvae
_SCREAMING_SNAKE_CASE = self.dummy_text_encoder
_SCREAMING_SNAKE_CASE = self.dummy_tokenizer
_SCREAMING_SNAKE_CASE = self.dummy_transformer
_SCREAMING_SNAKE_CASE = VQDiffusionScheduler(self.num_embed )
_SCREAMING_SNAKE_CASE = LearnedClassifierFreeSamplingEmbeddings(learnable=A_ )
_SCREAMING_SNAKE_CASE = VQDiffusionPipeline(
vqvae=A_ , text_encoder=A_ , tokenizer=A_ , transformer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , )
_SCREAMING_SNAKE_CASE = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_SCREAMING_SNAKE_CASE = '''teddy bear playing in the pool'''
_SCREAMING_SNAKE_CASE = torch.Generator(device=A_ ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe([prompt] , generator=A_ , num_inference_steps=2 , output_type='''np''' )
_SCREAMING_SNAKE_CASE = output.images
_SCREAMING_SNAKE_CASE = torch.Generator(device=A_ ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(
[prompt] , generator=A_ , output_type='''np''' , return_dict=A_ , num_inference_steps=2 )[0]
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_SCREAMING_SNAKE_CASE = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''cpu'''
_SCREAMING_SNAKE_CASE = self.dummy_vqvae
_SCREAMING_SNAKE_CASE = self.dummy_text_encoder
_SCREAMING_SNAKE_CASE = self.dummy_tokenizer
_SCREAMING_SNAKE_CASE = self.dummy_transformer
_SCREAMING_SNAKE_CASE = VQDiffusionScheduler(self.num_embed )
_SCREAMING_SNAKE_CASE = LearnedClassifierFreeSamplingEmbeddings(
learnable=A_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
_SCREAMING_SNAKE_CASE = VQDiffusionPipeline(
vqvae=A_ , text_encoder=A_ , tokenizer=A_ , transformer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , )
_SCREAMING_SNAKE_CASE = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_SCREAMING_SNAKE_CASE = '''teddy bear playing in the pool'''
_SCREAMING_SNAKE_CASE = torch.Generator(device=A_ ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe([prompt] , generator=A_ , num_inference_steps=2 , output_type='''np''' )
_SCREAMING_SNAKE_CASE = output.images
_SCREAMING_SNAKE_CASE = torch.Generator(device=A_ ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(
[prompt] , generator=A_ , output_type='''np''' , return_dict=A_ , num_inference_steps=2 )[0]
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_SCREAMING_SNAKE_CASE = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __snake_case( unittest.TestCase ):
def A ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
_SCREAMING_SNAKE_CASE = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
_SCREAMING_SNAKE_CASE = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
_SCREAMING_SNAKE_CASE = torch.Generator(device=A_ ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=A_ , output_type='''np''' , )
_SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 168
| 0
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(UpperCAmelCase__ )
if n > 1:
factors.append(UpperCAmelCase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 605
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __lowercase :
"""simple docstring"""
def __magic_name__ ( self , A_ , A_ , A_ )-> Union[str, Any]:
return None
class __lowercase :
"""simple docstring"""
def __magic_name__ ( self , A_ , A_ , A_ , A_ )-> int:
return None
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __magic_name__ ( self )-> str:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(A_ , 'tf' , 12 , **A_ )
@require_torch
@slow
def __magic_name__ ( self )-> Union[str, Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(A_ , 'pt' , 12 , **A_ )
@require_torch
@slow
def __magic_name__ ( self )-> List[str]:
from transformers import BertModel
_SCREAMING_SNAKE_CASE = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(A_ ) )
vocab_file.flush()
_SCREAMING_SNAKE_CASE = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
_SCREAMING_SNAKE_CASE = BertModel(BertConfig(vocab_size=len(A_ ) ) )
model.save_pretrained(A_ )
self._test_export(A_ , 'pt' , 12 , A_ )
@require_tf
@slow
def __magic_name__ ( self )-> Dict:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_SCREAMING_SNAKE_CASE = self._test_export(A_ , 'tf' , 12 , **A_ )
_SCREAMING_SNAKE_CASE = quantize(Path(A_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(A_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def __magic_name__ ( self )-> List[str]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_SCREAMING_SNAKE_CASE = self._test_export(A_ , 'pt' , 12 , **A_ )
_SCREAMING_SNAKE_CASE = quantize(A_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(A_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def __magic_name__ ( self , A_ , A_ , A_ , A_=None , **A_ )-> Any:
try:
# Compute path
with TemporaryDirectory() as tempdir:
_SCREAMING_SNAKE_CASE = Path(A_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(A_ , A_ , A_ , A_ , A_ , **A_ )
return path
except Exception as e:
self.fail(A_ )
@require_torch
@require_tokenizers
@slow
def __magic_name__ ( self )-> List[str]:
from transformers import BertModel
_SCREAMING_SNAKE_CASE = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
_SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(A_ , A_ , 'pt' )
@require_tf
@require_tokenizers
@slow
def __magic_name__ ( self )-> Optional[int]:
from transformers import TFBertModel
_SCREAMING_SNAKE_CASE = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
_SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(A_ , A_ , 'tf' )
def __magic_name__ ( self , A_ , A_ , A_ )-> List[str]:
_SCREAMING_SNAKE_CASE = FeatureExtractionPipeline(A_ , A_ )
_SCREAMING_SNAKE_CASE = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = infer_shapes(A_ , A_ )
# Assert all variables are present
self.assertEqual(len(A_ ) , len(A_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , A_ )
self.assertSequenceEqual(variable_names[3:] , A_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def __magic_name__ ( self )-> Dict:
_SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask', 'token_type_ids']
_SCREAMING_SNAKE_CASE = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = ensure_valid_input(FuncContiguousArgs() , A_ , A_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(A_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(A_ ) , set(A_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(A_ , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = ensure_valid_input(FuncNonContiguousArgs() , A_ , A_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(A_ ) , 1 )
self.assertEqual(len(A_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def __magic_name__ ( self )-> Optional[Any]:
_SCREAMING_SNAKE_CASE = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
| 605
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def __snake_case ( lowercase : Callable[[int | float], int | float] , lowercase : int | float , lowercase : int | float , lowercase : int = 100 , ):
snake_case_ = x_start
snake_case_ = fnc(lowercase )
snake_case_ = 0.0
for _ in range(lowercase ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
snake_case_ = (x_end - x_start) / steps + xa
snake_case_ = fnc(lowercase )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
snake_case_ = xa
snake_case_ = fxa
return area
if __name__ == "__main__":
def __snake_case ( lowercase : Optional[Any] ):
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
lowercase__ = 10
while i <= 10_00_00:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 420
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 420
| 1
|
from pathlib import Path
import numpy as np
from PIL import Image
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : np.ndarray ):
"""simple docstring"""
a_ , a_ , a_ : Optional[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : np.ndarray ):
"""simple docstring"""
return (gray > 1_27) & (gray <= 2_55)
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ):
"""simple docstring"""
a_ : Tuple = np.zeros_like(SCREAMING_SNAKE_CASE_ )
a_ : Optional[int] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
a_ : int = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
a_ : Any = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
a_ : Optional[Any] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE : List[Any] = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
SCREAMING_SNAKE_CASE : List[str] = np.array(Image.open(lena_path))
# kernel to be applied
SCREAMING_SNAKE_CASE : str = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
SCREAMING_SNAKE_CASE : Tuple = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 419
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class snake_case__ ( __A ):
UpperCAmelCase : Tuple = """switch_transformers"""
UpperCAmelCase : Optional[int] = ["""past_key_values"""]
UpperCAmelCase : List[Any] = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , UpperCamelCase_=32128 , UpperCamelCase_=768 , UpperCamelCase_=64 , UpperCamelCase_=2048 , UpperCamelCase_=64 , UpperCamelCase_=12 , UpperCamelCase_=3 , UpperCamelCase_=12 , UpperCamelCase_=3 , UpperCamelCase_=12 , UpperCamelCase_=8 , UpperCamelCase_=False , UpperCamelCase_=0.01 , UpperCamelCase_="float32" , UpperCamelCase_=False , UpperCamelCase_=32 , UpperCamelCase_=128 , UpperCamelCase_=0.1 , UpperCamelCase_=1e-6 , UpperCamelCase_=0.001 , UpperCamelCase_=0.001 , UpperCamelCase_=1.0 , UpperCamelCase_="relu" , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=0 , UpperCamelCase_=1 , **UpperCamelCase_ , ) -> str:
"""simple docstring"""
a_ : str = vocab_size
a_ : Dict = d_model
a_ : int = d_kv
a_ : Optional[int] = d_ff
a_ : str = num_sparse_encoder_layers
a_ : List[str] = num_layers
a_ : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ : Union[str, Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a_ : str = self.num_layers // self.num_sparse_encoder_layers
else:
a_ : Union[str, Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a_ : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a_ : str = self.num_decoder_layers # HACK: this will create 0 sparse layers
a_ : List[str] = num_heads
a_ : Any = num_experts
a_ : List[Any] = expert_capacity
a_ : Any = router_bias
a_ : str = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
a_ : Optional[int] = router_dtype
a_ : List[Any] = router_ignore_padding_tokens
a_ : Union[str, Any] = relative_attention_num_buckets
a_ : List[str] = relative_attention_max_distance
a_ : List[Any] = dropout_rate
a_ : Any = layer_norm_epsilon
a_ : Tuple = initializer_factor
a_ : Optional[int] = feed_forward_proj
a_ : Dict = use_cache
a_ : str = add_router_probs
a_ : Dict = router_z_loss_coef
a_ : Any = router_aux_loss_coef
a_ : Union[str, Any] = self.feed_forward_proj.split("""-""" )
a_ : str = act_info[-1]
a_ : Optional[Any] = act_info[0] == """gated"""
if len(UpperCamelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCamelCase_ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a_ : Optional[Any] = """gelu_new"""
super().__init__(
pad_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , **UpperCamelCase_ , )
| 419
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : Tuple = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = "efficientnet"
def __init__( self : Dict , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 6_0_0 , UpperCAmelCase_ : float = 2.0 , UpperCAmelCase_ : float = 3.1 , UpperCAmelCase_ : int = 8 , UpperCAmelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCAmelCase_ : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , UpperCAmelCase_ : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , UpperCAmelCase_ : List[int] = [] , UpperCAmelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCAmelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCAmelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCAmelCase_ : float = 0.25 , UpperCAmelCase_ : str = "swish" , UpperCAmelCase_ : int = 2_5_6_0 , UpperCAmelCase_ : str = "mean" , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : float = 0.0_01 , UpperCAmelCase_ : float = 0.99 , UpperCAmelCase_ : float = 0.5 , UpperCAmelCase_ : float = 0.2 , **UpperCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : Tuple = num_channels
a : List[str] = image_size
a : Optional[int] = width_coefficient
a : Dict = depth_coefficient
a : List[str] = depth_divisor
a : Tuple = kernel_sizes
a : Dict = in_channels
a : str = out_channels
a : Optional[int] = depthwise_padding
a : Any = strides
a : int = num_block_repeats
a : Any = expand_ratios
a : Tuple = squeeze_expansion_ratio
a : Optional[Any] = hidden_act
a : str = hidden_dim
a : Dict = pooling_type
a : Any = initializer_range
a : Tuple = batch_norm_eps
a : List[str] = batch_norm_momentum
a : Any = dropout_rate
a : List[str] = drop_connect_rate
a : List[str] = sum(UpperCAmelCase_) * 4
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : List[str] = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return 1e-5
| 610
|
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : Tuple = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase : Optional[int] = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
UpperCamelCase : Union[str, Any] = {
"""facebook/blenderbot_small-90M""": 512,
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Tuple = BlenderbotSmallTokenizer
def __init__( self : str , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : str="<|endoftext|>" , UpperCAmelCase_ : Tuple="<|endoftext|>" , UpperCAmelCase_ : Optional[Any]="<|endoftext|>" , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Dict=True , **UpperCAmelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase_ , merges=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ , ) , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , **UpperCAmelCase_ , )
a : Optional[Any] = add_prefix_space
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any]=None):
"""simple docstring"""
a : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
a : int = [self.sep_token_id]
a : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 610
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __a ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_A : int = KandinskyVaaInpaintPipeline
_A : Tuple = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
_A : Any = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
_A : Any = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_A : Optional[int] = False
@property
def __A ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return 3_2
@property
def __A ( self : Any ) -> int:
'''simple docstring'''
return 3_2
@property
def __A ( self : List[Any] ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def __A ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __A ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return 1_0_0
@property
def __A ( self : str ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ ={
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
SCREAMING_SNAKE_CASE__ =UNetaDConditionModel(**_UpperCamelCase )
return model
@property
def __A ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __A ( self : Tuple ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ =VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.dummy_unet
SCREAMING_SNAKE_CASE__ =self.dummy_movq
SCREAMING_SNAKE_CASE__ =DDIMScheduler(
num_train_timesteps=1_0_0_0 ,beta_schedule="""linear""" ,beta_start=0.0_0085 ,beta_end=0.012 ,clip_sample=_UpperCamelCase ,set_alpha_to_one=_UpperCamelCase ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_UpperCamelCase ,)
SCREAMING_SNAKE_CASE__ ={
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __A ( self : Optional[int] ,_UpperCamelCase : List[Any] ,_UpperCamelCase : Optional[int]=0 ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
_UpperCamelCase )
# create init_image
SCREAMING_SNAKE_CASE__ =floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =image.cpu().permute(0 ,2 ,3 ,1 )[0]
SCREAMING_SNAKE_CASE__ =Image.fromarray(np.uinta(_UpperCamelCase ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
# create mask
SCREAMING_SNAKE_CASE__ =np.ones((6_4, 6_4) ,dtype=np.floataa )
SCREAMING_SNAKE_CASE__ =0
if str(_UpperCamelCase ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ =torch.manual_seed(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE__ =torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ ={
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def __A ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ="""cpu"""
SCREAMING_SNAKE_CASE__ =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ =self.pipeline_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE__ =output.images
SCREAMING_SNAKE_CASE__ =pipe(
**self.get_dummy_inputs(_UpperCamelCase ) ,return_dict=_UpperCamelCase ,)[0]
SCREAMING_SNAKE_CASE__ =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ =image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 6_4, 6_4, 3)
SCREAMING_SNAKE_CASE__ =np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __A ( self : Dict ) -> Dict:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
SCREAMING_SNAKE_CASE__ =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
SCREAMING_SNAKE_CASE__ =np.ones((7_6_8, 7_6_8) ,dtype=np.floataa )
SCREAMING_SNAKE_CASE__ =0
SCREAMING_SNAKE_CASE__ ="""a hat"""
SCREAMING_SNAKE_CASE__ =KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" ,torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ =pipeline.to(_UpperCamelCase )
pipeline.set_progress_bar_config(disable=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =pipe_prior(
_UpperCamelCase ,generator=_UpperCamelCase ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
SCREAMING_SNAKE_CASE__ =pipeline(
image=_UpperCamelCase ,mask_image=_UpperCamelCase ,image_embeds=_UpperCamelCase ,negative_image_embeds=_UpperCamelCase ,generator=_UpperCamelCase ,num_inference_steps=1_0_0 ,height=7_6_8 ,width=7_6_8 ,output_type="""np""" ,)
SCREAMING_SNAKE_CASE__ =output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_UpperCamelCase ,_UpperCamelCase )
| 151
|
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =len(__UpperCamelCase ), len(grid[0] )
if (
min(__UpperCamelCase, __UpperCamelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
SCREAMING_SNAKE_CASE__ =0
count += depth_first_search(__UpperCamelCase, row + 1, __UpperCamelCase, __UpperCamelCase )
count += depth_first_search(__UpperCamelCase, row - 1, __UpperCamelCase, __UpperCamelCase )
count += depth_first_search(__UpperCamelCase, __UpperCamelCase, col + 1, __UpperCamelCase )
count += depth_first_search(__UpperCamelCase, __UpperCamelCase, col - 1, __UpperCamelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = '''Hello, World!'''
__SCREAMING_SNAKE_CASE : Any = '''en_XX'''
def lowerCAmelCase_( lowercase_ : str , lowercase_ : str , lowercase_ : bool ) -> List[str]:
_lowerCamelCase = Path('''data_bin''' )
_lowerCamelCase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(lowercase_ ).parent ) , checkpoint_file=Path(lowercase_ ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(lowercase_ ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(lowercase_ ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(lowercase_ )
_lowerCamelCase = xmod.model.encoder.sentence_encoder
_lowerCamelCase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
_lowerCamelCase = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , lowercase_ )
_lowerCamelCase = XmodForSequenceClassification(lowercase_ ) if classification_head else XmodForMaskedLM(lowercase_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
_lowerCamelCase = xmod_sent_encoder.embed_tokens.weight
_lowerCamelCase = xmod_sent_encoder.embed_positions.weight
_lowerCamelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
_lowerCamelCase = xmod_sent_encoder.layernorm_embedding.weight
_lowerCamelCase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_lowerCamelCase = model.roberta.encoder.layer[i]
_lowerCamelCase = xmod_sent_encoder.layers[i]
# self attention
_lowerCamelCase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
_lowerCamelCase = xmod_layer.self_attn.q_proj.weight
_lowerCamelCase = xmod_layer.self_attn.q_proj.bias
_lowerCamelCase = xmod_layer.self_attn.k_proj.weight
_lowerCamelCase = xmod_layer.self_attn.k_proj.bias
_lowerCamelCase = xmod_layer.self_attn.v_proj.weight
_lowerCamelCase = xmod_layer.self_attn.v_proj.bias
# self-attention output
_lowerCamelCase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
_lowerCamelCase = xmod_layer.self_attn.out_proj.weight
_lowerCamelCase = xmod_layer.self_attn.out_proj.bias
_lowerCamelCase = xmod_layer.self_attn_layer_norm.weight
_lowerCamelCase = xmod_layer.self_attn_layer_norm.bias
# intermediate
_lowerCamelCase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
_lowerCamelCase = xmod_layer.fca.weight
_lowerCamelCase = xmod_layer.fca.bias
# output
_lowerCamelCase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
_lowerCamelCase = xmod_layer.fca.weight
_lowerCamelCase = xmod_layer.fca.bias
_lowerCamelCase = xmod_layer.final_layer_norm.weight
_lowerCamelCase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
_lowerCamelCase = xmod_layer.adapter_layer_norm.weight
_lowerCamelCase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
_lowerCamelCase = bert_output.adapter_modules[lang_code]
_lowerCamelCase = xmod_layer.adapter_modules[lang_code]
_lowerCamelCase = from_adapter.fca.weight
_lowerCamelCase = from_adapter.fca.bias
_lowerCamelCase = from_adapter.fca.weight
_lowerCamelCase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
_lowerCamelCase = xmod_sent_encoder.layer_norm.weight
_lowerCamelCase = xmod_sent_encoder.layer_norm.bias
if classification_head:
_lowerCamelCase = xmod.model.classification_heads['''mnli'''].dense.weight
_lowerCamelCase = xmod.model.classification_heads['''mnli'''].dense.bias
_lowerCamelCase = xmod.model.classification_heads['''mnli'''].out_proj.weight
_lowerCamelCase = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
_lowerCamelCase = xmod.model.encoder.lm_head.dense.weight
_lowerCamelCase = xmod.model.encoder.lm_head.dense.bias
_lowerCamelCase = xmod.model.encoder.lm_head.layer_norm.weight
_lowerCamelCase = xmod.model.encoder.lm_head.layer_norm.bias
_lowerCamelCase = xmod.model.encoder.lm_head.weight
_lowerCamelCase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
_lowerCamelCase = xmod.encode(lowercase_ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(lowercase_ )
_lowerCamelCase = model(lowercase_ )[0]
if classification_head:
_lowerCamelCase = xmod.model.classification_heads['''mnli'''](xmod.extract_features(lowercase_ ) )
else:
_lowerCamelCase = xmod.model(lowercase_ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
_lowerCamelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
_lowerCamelCase = torch.allclose(lowercase_ , lowercase_ , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(lowercase_ ).mkdir(parents=lowercase_ , exist_ok=lowercase_ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 623
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
def lowerCAmelCase_( lowercase_ : list[Any] ) -> None:
create_state_space_tree(lowercase_ , [] , 0 )
def lowerCAmelCase_( lowercase_ : list[Any] , lowercase_ : list[Any] , lowercase_ : int ) -> None:
if index == len(lowercase_ ):
print(lowercase_ )
return
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 623
| 1
|
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = math.inf ,_SCREAMING_SNAKE_CASE = -math.inf ,_SCREAMING_SNAKE_CASE = math.inf ,_SCREAMING_SNAKE_CASE = -math.inf ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = 100 ,_SCREAMING_SNAKE_CASE = 0.01 ,_SCREAMING_SNAKE_CASE = 1 ,) -> str:
lowerCamelCase : str = False
lowerCamelCase : Optional[Any] = search_prob
lowerCamelCase : Dict = start_temperate
lowerCamelCase : Any = []
lowerCamelCase : Tuple = 0
lowerCamelCase : Union[str, Any] = None
while not search_end:
lowerCamelCase : str = current_state.score()
if best_state is None or current_score > best_state.score():
lowerCamelCase : Optional[Any] = current_state
scores.append(_SCREAMING_SNAKE_CASE )
iterations += 1
lowerCamelCase : str = None
lowerCamelCase : Any = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCamelCase : Dict = random.randint(0 ,len(_SCREAMING_SNAKE_CASE ) - 1 ) # picking a random neighbor
lowerCamelCase : Optional[int] = neighbors.pop(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCamelCase : int = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCamelCase : int = picked_neighbor
else:
lowerCamelCase : List[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCamelCase : List[Any] = picked_neighbor
lowerCamelCase : List[str] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCamelCase : Optional[int] = True
else:
lowerCamelCase : Tuple = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[str]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
SCREAMING_SNAKE_CASE__ : Dict = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
SCREAMING_SNAKE_CASE__ : Optional[Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
SCREAMING_SNAKE_CASE__ : Optional[int] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
SCREAMING_SNAKE_CASE__ : Optional[Any] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
return (3 * x**2) - (6 * y)
SCREAMING_SNAKE_CASE__ : List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
SCREAMING_SNAKE_CASE__ : int = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f'''{local_min.score()}'''
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
SCREAMING_SNAKE_CASE__ : List[str] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f'''{local_min.score()}'''
)
| 311
|
import math
import os
import sys
def __lowercase ( lowerCamelCase : str ):
UpperCamelCase_ : Dict = ''
try:
with open(lowerCamelCase , 'rb' ) as binary_file:
UpperCamelCase_ : Union[str, Any] = binary_file.read()
for dat in data:
UpperCamelCase_ : Optional[int] = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def __lowercase ( lowerCamelCase : dict[str, str] , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : str ):
lexicon.pop(lowerCamelCase )
UpperCamelCase_ : Optional[int] = last_match_id
if math.loga(lowerCamelCase ).is_integer():
for curr_key in lexicon:
UpperCamelCase_ : Optional[int] = '0' + lexicon[curr_key]
UpperCamelCase_ : List[str] = bin(lowerCamelCase )[2:]
def __lowercase ( lowerCamelCase : str ):
UpperCamelCase_ : List[str] = {'0': '0', '1': '1'}
UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = '', ''
UpperCamelCase_ : List[str] = len(lowerCamelCase )
for i in range(len(lowerCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase_ : Any = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
index += 1
UpperCamelCase_ : Optional[int] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
UpperCamelCase_ : Any = lexicon[curr_string]
result += last_match_id
return result
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
UpperCamelCase_ : Union[str, Any] = os.path.getsize(lowerCamelCase )
UpperCamelCase_ : List[str] = bin(lowerCamelCase )[2:]
UpperCamelCase_ : int = len(lowerCamelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
UpperCamelCase_ : Optional[int] = 8
try:
with open(lowerCamelCase , 'wb' ) as opened_file:
UpperCamelCase_ : List[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(lowerCamelCase ) , lowerCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(lowerCamelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
UpperCamelCase_ : Dict = read_file_binary(lowerCamelCase )
UpperCamelCase_ : Optional[int] = compress_data(lowerCamelCase )
UpperCamelCase_ : Dict = add_file_length(lowerCamelCase , lowerCamelCase )
write_file_binary(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 417
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : int = logging.get_logger(__name__)
__a : int = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class UpperCAmelCase( __a ):
"""simple docstring"""
a : int = '''roc_bert'''
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase="absolute" , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=768 , lowerCamelCase=910 , lowerCamelCase=512 , lowerCamelCase=24858 , lowerCamelCase=True , **lowerCamelCase , ) -> Dict:
"""simple docstring"""
lowercase__ : Dict = vocab_size
lowercase__ : Dict = max_position_embeddings
lowercase__ : str = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : str = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : int = attention_probs_dropout_prob
lowercase__ : Any = initializer_range
lowercase__ : str = type_vocab_size
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : List[str] = use_cache
lowercase__ : Any = enable_pronunciation
lowercase__ : List[Any] = enable_shape
lowercase__ : str = pronunciation_embed_dim
lowercase__ : List[Any] = pronunciation_vocab_size
lowercase__ : str = shape_embed_dim
lowercase__ : Any = shape_vocab_size
lowercase__ : List[Any] = concat_input
lowercase__ : Optional[Any] = position_embedding_type
lowercase__ : Union[str, Any] = classifier_dropout
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
| 706
|
import unittest
from knapsack import knapsack as k
class UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = 0
lowercase__ : int = [0]
lowercase__ : Optional[Any] = [0]
lowercase__ : Optional[int] = len(lowerCamelCase )
self.assertEqual(k.knapsack(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) , 0 )
lowercase__ : Any = [60]
lowercase__ : Dict = [10]
lowercase__ : Any = len(lowerCamelCase )
self.assertEqual(k.knapsack(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) , 0 )
def __a ( self ) -> str:
"""simple docstring"""
lowercase__ : Any = 3
lowercase__ : Union[str, Any] = [1, 2, 3]
lowercase__ : Dict = [3, 2, 1]
lowercase__ : List[Any] = len(lowerCamelCase )
self.assertEqual(k.knapsack(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) , 5 )
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = 50
lowercase__ : int = [60, 100, 120]
lowercase__ : Optional[Any] = [10, 20, 30]
lowercase__ : List[str] = len(lowerCamelCase )
self.assertEqual(k.knapsack(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 298
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = KandinskyVaaControlnetImgaImgPipeline
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
SCREAMING_SNAKE_CASE__ : List[str] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
@property
def __magic_name__( self :List[str] ) -> Optional[Any]:
return 32
@property
def __magic_name__( self :Any ) -> Any:
return 32
@property
def __magic_name__( self :int ) -> str:
return self.time_input_dim
@property
def __magic_name__( self :Optional[int] ) -> str:
return self.time_input_dim * 4
@property
def __magic_name__( self :Dict ) -> Dict:
return 100
@property
def __magic_name__( self :Dict ) -> Dict:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__SCREAMING_SNAKE_CASE : str = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def __magic_name__( self :int ) -> Optional[int]:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __magic_name__( self :List[Any] ) -> str:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __magic_name__( self :Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : int = self.dummy_unet
__SCREAMING_SNAKE_CASE : Any = self.dummy_movq
__SCREAMING_SNAKE_CASE : List[Any] = {
'''num_train_timesteps''': 1_000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
__SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int]=0 ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCAmelCase__ )
# create init_image
__SCREAMING_SNAKE_CASE : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : Dict = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
__SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : int = torch.manual_seed(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __magic_name__( self :Optional[int] ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''cpu'''
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.pipeline_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Dict = output.images
__SCREAMING_SNAKE_CASE : int = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[0.5498_5034, 0.5550_9365, 0.5256_1504, 0.557_0494, 0.559_3818, 0.526_3979, 0.5028_5643, 0.506_9846, 0.5119_6736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Dict ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self :List[Any] ) -> int:
__SCREAMING_SNAKE_CASE : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
__SCREAMING_SNAKE_CASE : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
__SCREAMING_SNAKE_CASE : Dict = init_image.resize((512, 512) )
__SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
__SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(np.array(lowerCAmelCase__ ) ).float() / 255.0
__SCREAMING_SNAKE_CASE : List[str] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''A robot, 4k photo'''
__SCREAMING_SNAKE_CASE : Dict = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : int = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = pipe_prior(
lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.85 , generator=lowerCAmelCase__ , negative_prompt='''''' , ).to_tuple()
__SCREAMING_SNAKE_CASE : Dict = pipeline(
image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , hint=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
__SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 696
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Any ={
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696
| 1
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Any ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__lowerCAmelCase = flax_key_tuple[:-1] + ('weight',)
__lowerCAmelCase = torch.permute(lowerCAmelCase_, (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCAmelCase_ ):
# linear layer
__lowerCAmelCase = flax_key_tuple[:-1] + ('weight',)
__lowerCAmelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__lowerCAmelCase = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Dict, lowerCAmelCase_ : Any ):
if "metadata" in layer:
__lowerCAmelCase = layer.split('metadata' )
__lowerCAmelCase = ''.join(split_layer[0] )[:-1]
__lowerCAmelCase = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
__lowerCAmelCase = layer.split('kvstore' )
__lowerCAmelCase = ''.join(split_layer[0] )[:-1]
__lowerCAmelCase = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
__lowerCAmelCase = layer.split('/' )
__lowerCAmelCase = '/'.join(split_layer[:-1] )
__lowerCAmelCase = (split_layer[-1],)
if "kvstore/path" in layer:
__lowerCAmelCase = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
__lowerCAmelCase = 'file'
else:
__lowerCAmelCase = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : int ):
__lowerCAmelCase = rename_keys(lowerCAmelCase_ )
__lowerCAmelCase = {}
for k, v in current_block.items():
__lowerCAmelCase = v
__lowerCAmelCase = new_current_block
torch.save(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : int, lowerCAmelCase_ : Dict, lowerCAmelCase_ : str = WEIGHTS_NAME ):
__lowerCAmelCase = convert_file_size_to_int(lowerCAmelCase_ )
__lowerCAmelCase = []
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
os.makedirs(lowerCAmelCase_, exist_ok=lowerCAmelCase_ )
with gfile.GFile(switch_checkpoint_path + '/checkpoint', 'rb' ) as fp:
__lowerCAmelCase = serialization.msgpack_restore(fp.read() )['optimizer']['target']
__lowerCAmelCase = flatten_dict(lowerCAmelCase_, sep='/' )
__lowerCAmelCase = {}
for layer in checkpoint_info.keys():
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_key_and_tensorstore_dict(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
if curr_real_layer_name in all_layers:
__lowerCAmelCase = content
else:
__lowerCAmelCase = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__lowerCAmelCase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__lowerCAmelCase = torch.tensor(lowerCAmelCase_ )
__lowerCAmelCase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__lowerCAmelCase , __lowerCAmelCase = rename_base_flax_keys(tuple(key.split('/' ) ), lowerCAmelCase_ )
__lowerCAmelCase = '/'.join(lowerCAmelCase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__lowerCAmelCase = os.path.join(
lowerCAmelCase_, weights_name.replace('.bin', F"""-{len(lowerCAmelCase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowerCAmelCase_, lowerCAmelCase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = raw_weights.to(getattr(lowerCAmelCase_, lowerCAmelCase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__lowerCAmelCase = os.path.join(lowerCAmelCase_, weights_name.replace('.bin', F"""-{len(lowerCAmelCase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowerCAmelCase_, lowerCAmelCase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowerCAmelCase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__lowerCAmelCase = {}
__lowerCAmelCase = {}
for idx, shard in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = weights_name.replace(
'.bin', F"""-{idx+1:05d}-of-{len(lowerCAmelCase_ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
__lowerCAmelCase = os.path.join(lowerCAmelCase_, weights_name.replace('.bin', F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowerCAmelCase_, os.path.join(lowerCAmelCase_, lowerCAmelCase_ ) )
__lowerCAmelCase = shard
for key in shard:
__lowerCAmelCase = shard_file
# Add the metadata
__lowerCAmelCase = {'total_size': total_size}
__lowerCAmelCase = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(lowerCAmelCase_, lowerCAmelCase_ ), 'w', encoding='utf-8' ) as f:
__lowerCAmelCase = json.dumps(lowerCAmelCase_, indent=2, sort_keys=lowerCAmelCase_ ) + '\n'
f.write(lowerCAmelCase_ )
return metadata, index
if __name__ == "__main__":
_snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
_snake_case : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def a_ ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__lowerCAmelCase = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
__lowerCAmelCase = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted', device_map='auto' )
__lowerCAmelCase = TaTokenizer.from_pretrained('t5-small' )
__lowerCAmelCase = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
__lowerCAmelCase = tokenizer(lowerCAmelCase_, return_tensors='pt' ).input_ids
__lowerCAmelCase = model.generate(lowerCAmelCase_, decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 421
|
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
_snake_case : List[Any] = get_tests_dir('fixtures/dummy-config.json')
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = 0
def lowercase ( self : Dict ) -> Optional[Any]:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto' ) )
def lowercase ( self : Dict ) -> List[str]:
__lowerCAmelCase = AutoConfig.from_pretrained('bert-base-uncased' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Optional[Any] ) -> List[str]:
__lowerCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> List[Any]:
__lowerCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase = AutoConfig.for_model('roberta' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> int:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
__lowerCAmelCase = os.path.join(lowerCAmelCase_ , 'fake-roberta' )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , 'config.json' ) , 'w' ) as f:
f.write(json.dumps({} ) )
__lowerCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertEqual(type(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> List[Any]:
try:
AutoConfig.register('custom' , lowerCAmelCase_ )
# Wrong model type will raise an error
with self.assertRaises(lowerCAmelCase_ ):
AutoConfig.register('model' , lowerCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase_ ):
AutoConfig.register('bert' , lowerCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowerCAmelCase = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def lowercase ( self : Optional[int] ) -> Dict:
with self.assertRaisesRegex(
lowerCAmelCase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
__lowerCAmelCase = AutoConfig.from_pretrained('bert-base' )
def lowercase ( self : List[Any] ) -> Dict:
with self.assertRaisesRegex(
lowerCAmelCase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__lowerCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ , revision='aaaaaa' )
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
with self.assertRaisesRegex(
lowerCAmelCase_ , 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.' , ):
__lowerCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo' )
def lowercase ( self : str ) -> str:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCAmelCase_ ):
__lowerCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase_ ):
__lowerCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=lowerCAmelCase_ )
__lowerCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(reloaded_config.__class__.__name__ , 'NewModelConfig' )
def lowercase ( self : List[Any] ) -> List[str]:
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """new-model"""
try:
AutoConfig.register('new-model' , lowerCAmelCase_ )
# If remote code is not set, the default is to use local
__lowerCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote code is disabled, we load the local one.
__lowerCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote is enabled, we load from the Hub
__lowerCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 421
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = ShapEImgaImgPipeline
lowerCAmelCase = ['''image''']
lowerCAmelCase = ['''image''']
lowerCAmelCase = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
lowerCAmelCase = False
@property
def a__ ( self ) -> Optional[int]:
return 32
@property
def a__ ( self ) -> str:
return 32
@property
def a__ ( self ) -> Any:
return self.time_input_dim * 4
@property
def a__ ( self ) -> Tuple:
return 8
@property
def a__ ( self ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=64 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1 ,)
UpperCAmelCase_ : str = CLIPVisionModel(_SCREAMING_SNAKE_CASE )
return model
@property
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : List[Any] = CLIPImageProcessor(
crop_size=224 ,do_center_crop=_SCREAMING_SNAKE_CASE ,do_normalize=_SCREAMING_SNAKE_CASE ,do_resize=_SCREAMING_SNAKE_CASE ,image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] ,image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] ,resample=3 ,size=224 ,)
return image_processor
@property
def a__ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
UpperCAmelCase_ : str = PriorTransformer(**_SCREAMING_SNAKE_CASE )
return model
@property
def a__ ( self ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase_ : List[str] = ShapERenderer(**_SCREAMING_SNAKE_CASE )
return model
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = self.dummy_prior
UpperCAmelCase_ : str = self.dummy_image_encoder
UpperCAmelCase_ : Union[str, Any] = self.dummy_image_processor
UpperCAmelCase_ : Tuple = self.dummy_renderer
UpperCAmelCase_ : Optional[Any] = HeunDiscreteScheduler(
beta_schedule='''exp''' ,num_train_timesteps=1_024 ,prediction_type='''sample''' ,use_karras_sigmas=_SCREAMING_SNAKE_CASE ,clip_sample=_SCREAMING_SNAKE_CASE ,clip_sample_range=1.0 ,)
UpperCAmelCase_ : Optional[int] = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=0 ) -> Any:
UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
UpperCAmelCase_ : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : List[str] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : List[str] = '''cpu'''
UpperCAmelCase_ : int = self.get_dummy_components()
UpperCAmelCase_ : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Optional[Any] = output.images[0]
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase_ : Dict = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self ) -> int:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = torch_device == '''cpu'''
UpperCAmelCase_ : List[str] = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=_SCREAMING_SNAKE_CASE ,relax_max_difference=_SCREAMING_SNAKE_CASE ,)
def a__ ( self ) -> str:
UpperCAmelCase_ : List[str] = self.get_dummy_components()
UpperCAmelCase_ : Any = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = 1
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Optional[int] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase_ : int = batch_size * [inputs[key]]
UpperCAmelCase_ : int = pipe(**_SCREAMING_SNAKE_CASE ,num_images_per_prompt=_SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> str:
UpperCAmelCase_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
UpperCAmelCase_ : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
UpperCAmelCase_ : str = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
UpperCAmelCase_ : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
UpperCAmelCase_ : str = pipe(
_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ,guidance_scale=3.0 ,num_inference_steps=64 ,frame_size=64 ,output_type='''np''' ,).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
| 30
|
'''simple docstring'''
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase ) -> bool:
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> bool:
'''simple docstring'''
if curr_ind == len(_lowercase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_lowercase ) ):
if valid_connection(_lowercase , _lowercase , _lowercase , _lowercase ):
# Insert current vertex into path as next transition
lowerCamelCase_ : Any = next_ver
# Validate created path
if util_hamilton_cycle(_lowercase , _lowercase , curr_ind + 1 ):
return True
# Backtrack
lowerCamelCase_ : Union[str, Any] = -1
return False
def lowercase_ ( _lowercase , _lowercase = 0 ) -> list[int]:
'''simple docstring'''
lowerCamelCase_ : int = [-1] * (len(_lowercase ) + 1)
# initialize start and end of path with starting index
lowerCamelCase_ : str = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_lowercase , _lowercase , 1 ) else []
| 422
| 0
|
from collections import deque
def _lowerCamelCase( UpperCamelCase__ : Tuple ) -> Any:
A : int = len(lowerCamelCase_ )
A : Dict = deque()
A : Optional[int] = [False for _ in range(lowerCamelCase_ )]
A : Union[str, Any] = [-1 for _ in range(lowerCamelCase_ )]
A : List[Any] = index_of[:]
def strong_connect(UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ):
A : List[str] = index # the number when this node is seen
A : Optional[Any] = index # lowest rank node reachable from here
index += 1
stack.append(lowerCamelCase_ )
A : Optional[Any] = True
for w in g[v]:
if index_of[w] == -1:
A : Tuple = strong_connect(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
A : Tuple = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
A : List[str] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
A : Tuple = []
A : List[str] = stack.pop()
A : List[str] = False
component.append(lowerCamelCase_ )
while w != v:
A : str = stack.pop()
A : List[str] = False
component.append(lowerCamelCase_ )
components.append(lowerCamelCase_ )
return index
A : int = []
for v in range(lowerCamelCase_ ):
if index_of[v] == -1:
strong_connect(lowerCamelCase_ , 0 , lowerCamelCase_ )
return components
def _lowerCamelCase( UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Optional[Any]:
A : Union[str, Any] = [[] for _ in range(lowerCamelCase_ )]
for u, v in edges:
g[u].append(lowerCamelCase_ )
return g
if __name__ == "__main__":
# Test
snake_case_ = 7
snake_case_ = [0, 0, 1, 2, 3, 3, 4, 4, 6]
snake_case_ = [1, 3, 2, 0, 1, 4, 5, 6, 5]
snake_case_ = [(u, v) for u, v in zip(source, target)]
snake_case_ = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 712
|
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
snake_case_ = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
snake_case_ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
snake_case_ = dict(zip(vocab, range(len(vocab))))
snake_case_ = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ = Path(tmpdirname)
snake_case_ = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
snake_case_ = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
snake_case_ = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
snake_case_ = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
snake_case_ = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=10_00,
tgt_vocab_size=10_00,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
snake_case_ = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
snake_case_ = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
snake_case_ = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 537
| 0
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
lowerCAmelCase_ = pd.read_csv("sample_data.csv", header=None)
lowerCAmelCase_ = df.shape[:1][0]
# If you're using some other dataset input the target column
lowerCAmelCase_ = df.iloc[:, 1:2]
lowerCAmelCase_ = actual_data.values.reshape(len_data, 1)
lowerCAmelCase_ = MinMaxScaler().fit_transform(actual_data)
lowerCAmelCase_ = 1_0
lowerCAmelCase_ = 5
lowerCAmelCase_ = 2_0
lowerCAmelCase_ = len_data - periods * look_back
lowerCAmelCase_ = actual_data[:division]
lowerCAmelCase_ = actual_data[division - look_back :]
lowerCAmelCase_ , lowerCAmelCase_ = [], []
lowerCAmelCase_ , lowerCAmelCase_ = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
lowerCAmelCase_ = np.array(train_x)
lowerCAmelCase_ = np.array(test_x)
lowerCAmelCase_ = np.array([list(i.ravel()) for i in train_y])
lowerCAmelCase_ = np.array([list(i.ravel()) for i in test_y])
lowerCAmelCase_ = Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
lowerCAmelCase_ = model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
lowerCAmelCase_ = model.predict(x_test)
| 326
|
from math import pi, sqrt
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: float ) -> float:
if num <= 0:
raise ValueError("math domain error" )
if num > 171.5:
raise OverflowError("math range error" )
elif num - int(lowerCAmelCase ) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer" )
elif num == 0.5:
return sqrt(lowerCAmelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __SCREAMING_SNAKE_CASE ( ) -> None:
assert gamma(0.5 ) == sqrt(lowerCAmelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE_ = 1.0
while num:
SCREAMING_SNAKE_CASE_ = float(input('Gamma of: '))
print(F'''gamma({num}) = {gamma(num)}''')
print('\nEnter 0 to exit...')
| 300
| 0
|
"""simple docstring"""
import os
from pathlib import Path
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = {
"en": "Machine learning is great, isn\'t it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase__ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
lowercase__ = f'''{src_lang}-{tgt_lang}'''
lowercase__ = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(a__ , exist_ok=a__ )
lowercase__ = os.path.join(a__ , "README.md" )
print(f'''Generating {path}''' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(a__ )
# make sure we are under the root of the project
lowerCAmelCase_: List[Any] = Path(__file__).resolve().parent.parent.parent
lowerCAmelCase_: Optional[Any] = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_: Dict = model_name.split("-")
lowerCAmelCase_: Union[str, Any] = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 714
|
"""simple docstring"""
import os
import sys
lowerCAmelCase_: Any = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCAmelCase_: Union[str, Any] = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoConfig.from_pretrained(*A , **A )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*A , **A )
@add_start_docstrings(AutoModel.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModel.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*A , **A )
| 668
| 0
|
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowerCAmelCase__ = logging.get_logger(__name__)
# General docstring
lowerCAmelCase__ = 'PoolFormerConfig'
# Base docstring
lowerCAmelCase__ = 'sail/poolformer_s12'
lowerCAmelCase__ = [1, 512, 7, 7]
# Image classification docstring
lowerCAmelCase__ = 'sail/poolformer_s12'
lowerCAmelCase__ = 'tabby, tabby cat'
lowerCAmelCase__ = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def _lowerCamelCase ( __a, __a = 0.0, __a = False ):
if drop_prob == 0.0 or not training:
return input
SCREAMING_SNAKE_CASE_ = 1 - drop_prob
SCREAMING_SNAKE_CASE_ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
SCREAMING_SNAKE_CASE_ = keep_prob + torch.rand(__a, dtype=input.dtype, device=input.device )
random_tensor.floor_() # binarize
SCREAMING_SNAKE_CASE_ = input.div(__a ) * random_tensor
return output
class snake_case ( nn.Module ):
def __init__(self , SCREAMING_SNAKE_CASE_ = None ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = drop_prob
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return drop_path(SCREAMING_SNAKE_CASE_ , self.drop_prob , self.training )
def _lowercase (self ):
"""simple docstring"""
return "p={}".format(self.drop_prob )
class snake_case ( nn.Module ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = patch_size if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ) else (patch_size, patch_size)
SCREAMING_SNAKE_CASE_ = stride if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ) else (stride, stride)
SCREAMING_SNAKE_CASE_ = padding if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ) else (padding, padding)
SCREAMING_SNAKE_CASE_ = nn.Convad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = norm_layer(SCREAMING_SNAKE_CASE_ ) if norm_layer else nn.Identity()
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.projection(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.norm(SCREAMING_SNAKE_CASE_ )
return embeddings
class snake_case ( nn.GroupNorm ):
def __init__(self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__(1 , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class snake_case ( nn.Module ):
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.AvgPoolad(SCREAMING_SNAKE_CASE_ , stride=1 , padding=pool_size // 2 , count_include_pad=SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return self.pool(SCREAMING_SNAKE_CASE_ ) - hidden_states
class snake_case ( nn.Module ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Convad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
SCREAMING_SNAKE_CASE_ = nn.Convad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
SCREAMING_SNAKE_CASE_ = PoolFormerDropPath(SCREAMING_SNAKE_CASE_ )
if isinstance(config.hidden_act , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE_ = config.hidden_act
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.conva(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.act_fn(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.drop(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.conva(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.drop(SCREAMING_SNAKE_CASE_ )
return hidden_states
class snake_case ( nn.Module ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = PoolFormerPooling(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = PoolFormerOutput(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE_ )
# Useful for training neural nets
SCREAMING_SNAKE_CASE_ = PoolFormerDropPath(SCREAMING_SNAKE_CASE_ ) if drop_path > 0.0 else nn.Identity()
SCREAMING_SNAKE_CASE_ = config.use_layer_scale
if config.use_layer_scale:
SCREAMING_SNAKE_CASE_ = nn.Parameter(
config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE_) ) , requires_grad=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = nn.Parameter(
config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE_) ) , requires_grad=SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if self.use_layer_scale:
SCREAMING_SNAKE_CASE_ = self.pooling(self.before_norm(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
SCREAMING_SNAKE_CASE_ = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = ()
SCREAMING_SNAKE_CASE_ = self.output(self.after_norm(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
SCREAMING_SNAKE_CASE_ = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = (output,) + outputs
return outputs
else:
SCREAMING_SNAKE_CASE_ = self.drop_path(self.pooling(self.before_norm(SCREAMING_SNAKE_CASE_ ) ) )
# First residual connection
SCREAMING_SNAKE_CASE_ = pooling_output + hidden_states
SCREAMING_SNAKE_CASE_ = ()
# Second residual connection inside the PoolFormerOutput block
SCREAMING_SNAKE_CASE_ = self.drop_path(self.output(self.after_norm(SCREAMING_SNAKE_CASE_ ) ) )
SCREAMING_SNAKE_CASE_ = hidden_states + layer_output
SCREAMING_SNAKE_CASE_ = (output,) + outputs
return outputs
class snake_case ( nn.Module ):
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = config
# stochastic depth decay rule
SCREAMING_SNAKE_CASE_ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
SCREAMING_SNAKE_CASE_ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
SCREAMING_SNAKE_CASE_ = nn.ModuleList(SCREAMING_SNAKE_CASE_ )
# Transformer blocks
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
SCREAMING_SNAKE_CASE_ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
SCREAMING_SNAKE_CASE_ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = nn.ModuleList(SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = () if output_hidden_states else None
SCREAMING_SNAKE_CASE_ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = layers
# Get patch embeddings from hidden_states
SCREAMING_SNAKE_CASE_ = embedding_layer(SCREAMING_SNAKE_CASE_ )
# Send the embeddings through the blocks
for _, blk in enumerate(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = blk(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = layer_outputs[0]
if output_hidden_states:
SCREAMING_SNAKE_CASE_ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ )
class snake_case ( __lowercase ):
UpperCAmelCase__ = PoolFormerConfig
UpperCAmelCase__ = '''poolformer'''
UpperCAmelCase__ = '''pixel_values'''
UpperCAmelCase__ = True
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(SCREAMING_SNAKE_CASE_ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = value
lowerCAmelCase__ = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase__ = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , __lowercase , )
class snake_case ( __lowercase ):
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = config
SCREAMING_SNAKE_CASE_ = PoolFormerEncoder(SCREAMING_SNAKE_CASE_ )
# Initialize weights and apply final processing
self.post_init()
def _lowercase (self ):
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowercase (self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
SCREAMING_SNAKE_CASE_ = self.encoder(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=encoder_outputs.hidden_states , )
class snake_case ( nn.Module ):
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Linear(config.hidden_size , config.hidden_size )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.dense(SCREAMING_SNAKE_CASE_ )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , __lowercase , )
class snake_case ( __lowercase ):
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = config.num_labels
SCREAMING_SNAKE_CASE_ = PoolFormerModel(SCREAMING_SNAKE_CASE_ )
# Final norm
SCREAMING_SNAKE_CASE_ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
SCREAMING_SNAKE_CASE_ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowercase (self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ = self.poolformer(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = outputs[0]
SCREAMING_SNAKE_CASE_ = self.classifier(self.norm(SCREAMING_SNAKE_CASE_ ).mean([-2, -1] ) )
SCREAMING_SNAKE_CASE_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_ = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE_ = '''single_label_classification'''
else:
SCREAMING_SNAKE_CASE_ = '''multi_label_classification'''
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE_ = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE_ = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE_ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE_ = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE_ = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not return_dict:
SCREAMING_SNAKE_CASE_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states )
| 626
|
"""simple docstring"""
def _lowerCamelCase ( __a ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 1
while repunit:
SCREAMING_SNAKE_CASE_ = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _lowerCamelCase ( __a = 1_000_000 ):
SCREAMING_SNAKE_CASE_ = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__a ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'''{solution() = }''')
| 626
| 1
|
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __A() -> List[Any]:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=lowerCAmelCase , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=lowerCAmelCase , default=5 )
parser.add_argument("""--batch_size""" , type=lowerCAmelCase , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=lowerCAmelCase , default=1 )
parser.add_argument("""--freeze""" , type=lowerCAmelCase , default=lowerCAmelCase )
parser.add_argument("""--learning_rate""" , type=lowerCAmelCase , default=5e-4 )
parser.add_argument("""--seed""" , type=lowerCAmelCase , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=lowerCAmelCase , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=lowerCAmelCase , default=1_0 )
parser.add_argument("""--weight_decay""" , type=lowerCAmelCase , default=0.01 )
parser.add_argument("""--output_dir""" , type=lowerCAmelCase , default="""./results""" )
return parser.parse_args()
lowerCamelCase__ = load("accuracy")
def __A(lowerCAmelCase ) -> List[str]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = eval_pred
_UpperCamelCase = np.argmax(lowerCAmelCase , axis=1 )
return metric.compute(predictions=lowerCAmelCase , references=lowerCAmelCase )
class lowerCAmelCase__ ( __lowercase ):
def __init__( self , a ) -> None:
'''simple docstring'''
super().__init__()
_UpperCamelCase = trainer
def A_ ( self , a , a , a , **a ) -> Union[str, Any]:
'''simple docstring'''
if control.should_evaluate:
_UpperCamelCase = deepcopy(a )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def __A() -> List[Any]:
"""simple docstring"""
_UpperCamelCase = get_args()
set_seed(args.seed )
_UpperCamelCase = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
_UpperCamelCase = dataset.train_test_split(test_size=0.2 )
_UpperCamelCase = train_test["""test"""].train_test_split(test_size=0.5 )
_UpperCamelCase = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
_UpperCamelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCamelCase = tokenizer.eos_token
_UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
_UpperCamelCase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_UpperCamelCase = False
_UpperCamelCase = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(lowerCAmelCase ):
_UpperCamelCase = tokenizer(example["""src"""] , truncation=lowerCAmelCase , max_length=1_0_2_4 )
_UpperCamelCase = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_UpperCamelCase = train_test_validation.map(
lowerCAmelCase , batched=lowerCAmelCase , remove_columns=train_test_validation["""train"""].column_names , )
_UpperCamelCase = DataCollatorWithPadding(tokenizer=lowerCAmelCase )
_UpperCamelCase = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
_UpperCamelCase = Trainer(
model=lowerCAmelCase , args=lowerCAmelCase , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=lowerCAmelCase , data_collator=lowerCAmelCase , compute_metrics=lowerCAmelCase , )
print("""Training...""" )
trainer.add_callback(CustomCallback(lowerCAmelCase ) )
trainer.train()
if __name__ == "__main__":
main()
| 202
|
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
UpperCamelCase_ : List[Any] = XLNetTokenizer
UpperCamelCase_ : Optional[int] = XLNetTokenizerFast
UpperCamelCase_ : Dict = True
UpperCamelCase_ : Dict = True
def A_ ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = XLNetTokenizer(a , keep_accents=a )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = """<s>"""
_UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def A_ ( self ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<eod>""" )
self.assertEqual(len(a ) , 10_06 )
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def A_ ( self ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = XLNetTokenizer(a , keep_accents=a )
_UpperCamelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [2_85, 46, 10, 1_70, 3_82] )
_UpperCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(a , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def A_ ( self ) -> int:
'''simple docstring'''
_UpperCamelCase = XLNetTokenizer(a , do_lower_case=a )
_UpperCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] )
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = XLNetTokenizer(a , do_lower_case=a )
_UpperCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
_UpperCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=a )
_UpperCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=a )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(a )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(a , a )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def A_ ( self ) -> int:
'''simple docstring'''
_UpperCamelCase = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 202
| 1
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple ) -> Tuple:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4e_00 and cp <= 0x9f_ff)
or (cp >= 0x34_00 and cp <= 0x4d_bf) #
or (cp >= 0x2_00_00 and cp <= 0x2_a6_df) #
or (cp >= 0x2_a7_00 and cp <= 0x2_b7_3f) #
or (cp >= 0x2_b7_40 and cp <= 0x2_b8_1f) #
or (cp >= 0x2_b8_20 and cp <= 0x2_ce_af) #
or (cp >= 0xf9_00 and cp <= 0xfa_ff)
or (cp >= 0x2_f8_00 and cp <= 0x2_fa_1f) #
): #
return True
return False
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Tuple:
# word like '180' or '身高' or '神'
for char in word:
_lowercase = ord(snake_case__ )
if not _is_chinese_char(snake_case__ ):
return 0
return 1
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] ) -> str:
_lowercase = set()
for token in tokens:
_lowercase = len(snake_case__ ) > 1 and is_chinese(snake_case__ )
if chinese_word:
word_set.add(snake_case__ )
_lowercase = list(snake_case__ )
return word_list
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :set() ) -> Dict:
if not chinese_word_set:
return bert_tokens
_lowercase = max([len(snake_case__ ) for w in chinese_word_set] )
_lowercase = bert_tokens
_lowercase , _lowercase = 0, len(snake_case__ )
while start < end:
_lowercase = True
if is_chinese(bert_word[start] ):
_lowercase = min(end - start , snake_case__ )
for i in range(snake_case__ , 1 , -1 ):
_lowercase = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowercase = '##' + bert_word[j]
_lowercase = start + i
_lowercase = False
break
if single_word:
start += 1
return bert_word
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :LTP , snake_case__ :BertTokenizer ) -> Optional[Any]:
_lowercase = []
for i in range(0 , len(snake_case__ ) , 100 ):
_lowercase = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['cws'] ).cws
_lowercase = [get_chinese_word(snake_case__ ) for r in res]
ltp_res.extend(snake_case__ )
assert len(snake_case__ ) == len(snake_case__ )
_lowercase = []
for i in range(0 , len(snake_case__ ) , 100 ):
_lowercase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=snake_case__ , truncation=snake_case__ , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(snake_case__ ) == len(snake_case__ )
_lowercase = []
for input_ids, chinese_word in zip(snake_case__ , snake_case__ ):
_lowercase = []
for id in input_ids:
_lowercase = bert_tokenizer._convert_id_to_token(snake_case__ )
input_tokens.append(snake_case__ )
_lowercase = add_sub_symbol(snake_case__ , snake_case__ )
_lowercase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(snake_case__ ):
if token[:2] == "##":
_lowercase = token[2:]
# save chinese tokens' pos
if len(snake_case__ ) == 1 and _is_chinese_char(ord(snake_case__ ) ):
ref_id.append(snake_case__ )
ref_ids.append(snake_case__ )
assert len(snake_case__ ) == len(snake_case__ )
return ref_ids
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> Any:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_lowercase = f.readlines()
_lowercase = [line.strip() for line in data if len(snake_case__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowercase = LTP(args.ltp ) # faster in GPU device
_lowercase = BertTokenizer.from_pretrained(args.bert )
_lowercase = prepare_ref(snake_case__ , snake_case__ , snake_case__ )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_lowercase = [json.dumps(snake_case__ ) + '\n' for ref in ref_ids]
f.writelines(snake_case__ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
snake_case = parser.parse_args()
main(args)
| 67
|
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
A : List[str] = pytest.mark.integration
A : Optional[Any] = {"comet"}
A : int = importlib.util.find_spec("fairseq") is not None
A : Union[str, Any] = {"code_eval"}
A : Dict = os.name == "nt"
A : Dict = {"bertscore", "frugalscore", "perplexity"}
A : Any = importlib.util.find_spec("transformers") is not None
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
@wraps(_UpperCamelCase )
def wrapper(self , _UpperCamelCase ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , _UpperCamelCase )
return wrapper
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
@wraps(_UpperCamelCase )
def wrapper(self , _UpperCamelCase ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , _UpperCamelCase )
return wrapper
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
@wraps(_UpperCamelCase )
def wrapper(self , _UpperCamelCase ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , _UpperCamelCase )
return wrapper
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
@local
class _UpperCamelCase ( parameterized.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any ={}
__UpperCAmelCase : List[str] =None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def snake_case ( self , __a ):
__lowerCAmelCase = "[...]"
__lowerCAmelCase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __a ) ).module_path )
__lowerCAmelCase = datasets.load.import_main_class(metric_module.__name__ , dataset=__a )
# check parameters
__lowerCAmelCase = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__a , metric_module.__name__ ):
with self.use_local_metrics():
try:
__lowerCAmelCase = doctest.testmod(__a , verbose=__a , raise_on_error=__a )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def snake_case ( self , __a ):
__lowerCAmelCase = "[...]"
__lowerCAmelCase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __a ) ).module_path )
# run doctest
with self.use_local_metrics():
__lowerCAmelCase = doctest.testmod(__a , verbose=__a , raise_on_error=__a )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def snake_case ( self , __a , __a ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__a ):
yield
else:
yield
@contextmanager
def snake_case ( self ):
def load_local_metric(__a , *__a , **__a ):
return load_metric(os.path.join("metrics" , __a ) , *__a , **__a )
with patch("datasets.load_metric" ) as mock_load_metric:
__lowerCAmelCase = load_local_metric
yield
@classmethod
def snake_case ( cls , __a ):
def wrapper(__a ):
__lowerCAmelCase = contextmanager(__a )
__lowerCAmelCase = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def snake_case ( self , __a ):
assert len(input_dict["input_ids"] ) == 2
return np.array([1.0_3, 1.0_4] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
__lowerCAmelCase = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
import torch
def bert_cos_score_idf(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_UpperCamelCase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
__lowerCAmelCase = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
def load_from_checkpoint(_UpperCamelCase ):
class _UpperCamelCase :
'''simple docstring'''
def snake_case ( self , __a , *__a , **__a ):
assert len(__a ) == 2
__lowerCAmelCase = [0.1_9, 0.9_2]
return scores, sum(__a ) / len(__a )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
__lowerCAmelCase = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
__lowerCAmelCase = load_from_checkpoint
yield
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = load_metric(os.path.join("metrics" , "seqeval" ) )
__lowerCAmelCase = "ERROR"
__lowerCAmelCase = f"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"
with pytest.raises(_UpperCamelCase , match=re.escape(_UpperCamelCase ) ):
metric.compute(predictions=[] , references=[] , scheme=_UpperCamelCase )
| 636
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
a_ : int = logging.get_logger(__name__)
a_ : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : int = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
a_ : Optional[Any] = {
"""squeezebert/squeezebert-uncased""": 512,
"""squeezebert/squeezebert-mnli""": 512,
"""squeezebert/squeezebert-mnli-headless""": 512,
}
a_ : Any = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = SqueezeBertTokenizer
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCamelCase_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCamelCase_ ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(UpperCamelCase_ , normalizer_state.pop("type" ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**UpperCamelCase_ )
lowerCamelCase_ = do_lower_case
def snake_case ( self , UpperCamelCase , UpperCamelCase=None ):
"""simple docstring"""
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 714
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=7 , UpperCamelCase=3 , UpperCamelCase=30 , UpperCamelCase=400 , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=1 / 255 , UpperCamelCase=True , UpperCamelCase=[0.5, 0.5, 0.5] , UpperCamelCase=[0.5, 0.5, 0.5] , UpperCamelCase=True , ):
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCamelCase_ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
lowerCamelCase_ = do_pad
def snake_case ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def snake_case ( self , UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
if not batched:
lowerCamelCase_ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
lowerCamelCase_ ,lowerCamelCase_ = image.size
else:
lowerCamelCase_ ,lowerCamelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ = int(self.size["shortest_edge"] * h / w )
lowerCamelCase_ = self.size["shortest_edge"]
elif w > h:
lowerCamelCase_ = self.size["shortest_edge"]
lowerCamelCase_ = int(self.size["shortest_edge"] * w / h )
else:
lowerCamelCase_ = self.size["shortest_edge"]
lowerCamelCase_ = self.size["shortest_edge"]
else:
lowerCamelCase_ = []
for image in image_inputs:
lowerCamelCase_ ,lowerCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
lowerCamelCase_ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = DetrImageProcessor if is_vision_available() else None
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = DetrImageProcessingTester(self )
@property
def snake_case ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(UpperCamelCase , "rescale_factor" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase , "size" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_pad" ) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
lowerCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
lowerCamelCase_ = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case ( self ):
"""simple docstring"""
# prepare image and target
lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {"image_id": 3_9769, "annotations": target}
# encode them
lowerCamelCase_ = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50" )
lowerCamelCase_ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="pt" )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCamelCase ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCamelCase ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCamelCase ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCamelCase ) )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCamelCase ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCamelCase ) )
@slow
def snake_case ( self ):
"""simple docstring"""
# prepare image, target and masks_path
lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
lowerCamelCase_ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowerCamelCase_ = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic" )
lowerCamelCase_ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="pt" )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCamelCase ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCamelCase ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCamelCase ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCamelCase ) )
# verify masks
lowerCamelCase_ = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , UpperCamelCase )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCamelCase ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCamelCase ) )
| 445
| 0
|
'''simple docstring'''
def A (__lowerCamelCase :int , __lowerCamelCase :int ):
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
_lowerCAmelCase = str(bin(__lowerCamelCase ) )[2:] # remove the leading "0b"
_lowerCAmelCase = str(bin(__lowerCamelCase ) )[2:] # remove the leading "0b"
_lowerCAmelCase = max(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__lowerCamelCase ) , b_binary.zfill(__lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : List[str] = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
_a : str = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
_a : Tuple = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
_a : Dict = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 479
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase_ ( _lowercase : list[int] , _lowercase : int ):
'''simple docstring'''
if len(_lowercase ) < k or k < 0:
raise ValueError("Invalid Input" )
UpperCAmelCase : List[str] = sum(array[:k] )
for i in range(len(_lowercase ) - k ):
UpperCAmelCase : Optional[Any] = current_sum - array[i] + array[i + k]
UpperCAmelCase : Union[str, Any] = max(_lowercase , _lowercase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
snake_case_ : Union[str, Any] = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0_0)]
snake_case_ : int = randint(0, 1_1_0)
print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 292
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
snake_case_ : Union[str, Any] = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowercase_ ( _lowercase : List[str] ):
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def lowercase_ ( _lowercase : List[str] ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowercase )
def lowercase_ ( _lowercase : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
UpperCAmelCase : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowercase , id=_lowercase )
def lowercase_ ( _lowercase : str , _lowercase : Dict ):
'''simple docstring'''
if exitstatus == 5:
UpperCAmelCase : List[str] = 0
# Doctest custom flag to ignore output.
snake_case_ : Union[str, Any] = doctest.register_optionflag("""IGNORE_RESULT""")
snake_case_ : Optional[int] = doctest.OutputChecker
class snake_case__ ( lowerCAmelCase_ ):
def __lowerCAmelCase ( self : List[Any] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : int ):
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowercase , lowercase , lowercase )
snake_case_ : List[str] = CustomOutputChecker
snake_case_ : Optional[Any] = HfDoctestModule
snake_case_ : List[str] = HfDocTestParser
| 292
| 1
|
'''simple docstring'''
import random
from typing import Any
def UpperCAmelCase ( UpperCAmelCase__ : list):
for _ in range(len(UpperCAmelCase__)):
lowerCamelCase : List[Any] = random.randint(0 , len(UpperCAmelCase__) - 1)
lowerCamelCase : Optional[int] = random.randint(0 , len(UpperCAmelCase__) - 1)
lowerCamelCase , lowerCamelCase : Tuple = data[b], data[a]
return data
if __name__ == "__main__":
A = [0, 1, 2, 3, 4, 5, 6, 7]
A = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 320
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 320
| 1
|
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int):
if not isinstance(lowerCamelCase , lowerCamelCase):
A_ : List[Any] = F'Input value of [number={number}] must be an integer'
raise TypeError(lowerCamelCase)
if number < 1:
A_ : int = F'Input value of [number={number}] must be > 0'
raise ValueError(lowerCamelCase)
A_ : Optional[Any] = 1
for i in range(1 , lowerCamelCase):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """deberta-v2"""
def __init__( self : Optional[Any] ,_a : Union[str, Any]=128100 ,_a : Optional[int]=1536 ,_a : Dict=24 ,_a : int=24 ,_a : Tuple=6144 ,_a : Union[str, Any]="gelu" ,_a : List[Any]=0.1 ,_a : Dict=0.1 ,_a : int=512 ,_a : int=0 ,_a : int=0.02 ,_a : int=1e-7 ,_a : List[str]=False ,_a : Union[str, Any]=-1 ,_a : List[Any]=0 ,_a : Optional[Any]=True ,_a : Tuple=None ,_a : Any=0 ,_a : int="gelu" ,**_a : Any ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Union[str, Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : List[Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : List[Any] = initializer_range
A_ : int = relative_attention
A_ : Tuple = max_relative_positions
A_ : int = pad_token_id
A_ : Tuple = position_biased_input
# Backwards compatibility
if type(_a ) == str:
A_ : str = [x.strip() for x in pos_att_type.lower().split("""|""" )]
A_ : Any = pos_att_type
A_ : Optional[int] = vocab_size
A_ : Tuple = layer_norm_eps
A_ : Any = kwargs.get("""pooler_hidden_size""" ,_a )
A_ : Union[str, Any] = pooler_dropout
A_ : List[Any] = pooler_hidden_act
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
A_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : Any = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
return 12
def _a ( self : int ,_a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_a : int = -1 ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 3 ,_a : int = 40 ,_a : int = 40 ,_a : "PreTrainedTokenizerBase" = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(preprocessor=_a ,framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 27
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ : List[Any] = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ : Tuple = {
"roberta-base": 5_12,
"roberta-large": 5_12,
"roberta-large-mnli": 5_12,
"distilroberta-base": 5_12,
"roberta-base-openai-detector": 5_12,
"roberta-large-openai-detector": 5_12,
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Dict = VOCAB_FILES_NAMES
snake_case__ :str = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ :Optional[Any] = ['input_ids', 'attention_mask']
snake_case__ :Optional[int] = RobertaTokenizer
def __init__( self : Union[str, Any] , __magic_name__ : Any=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : Tuple=None , __magic_name__ : int="replace" , __magic_name__ : Optional[int]="<s>" , __magic_name__ : List[Any]="</s>" , __magic_name__ : Tuple="</s>" , __magic_name__ : int="<s>" , __magic_name__ : Optional[int]="<unk>" , __magic_name__ : Tuple="<pad>" , __magic_name__ : List[str]="<mask>" , __magic_name__ : Dict=False , __magic_name__ : Union[str, Any]=True , **__magic_name__ : int , ):
"""simple docstring"""
super().__init__(
__magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , **__magic_name__ , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __magic_name__ ) != add_prefix_space:
lowerCAmelCase__ = getattr(__magic_name__ , pre_tok_state.pop("type" ) )
lowerCAmelCase__ = add_prefix_space
lowerCAmelCase__ = pre_tok_class(**__magic_name__ )
lowerCAmelCase__ = add_prefix_space
lowerCAmelCase__ = "post_processor"
lowerCAmelCase__ = getattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
if tokenizer_component_instance:
lowerCAmelCase__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase__ = tuple(state["sep"] )
if "cls" in state:
lowerCAmelCase__ = tuple(state["cls"] )
lowerCAmelCase__ = False
if state.get("add_prefix_space" , __magic_name__ ) != add_prefix_space:
lowerCAmelCase__ = add_prefix_space
lowerCAmelCase__ = True
if state.get("trim_offsets" , __magic_name__ ) != trim_offsets:
lowerCAmelCase__ = trim_offsets
lowerCAmelCase__ = True
if changes_to_apply:
lowerCAmelCase__ = getattr(__magic_name__ , state.pop("type" ) )
lowerCAmelCase__ = component_class(**__magic_name__ )
setattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
@property
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else value
lowerCAmelCase__ = value
def __SCREAMING_SNAKE_CASE ( self : int , *__magic_name__ : Union[str, Any] , **__magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = kwargs.get("is_split_into_words" , __magic_name__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any , *__magic_name__ : Any , **__magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = kwargs.get("is_split_into_words" , __magic_name__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
lowerCAmelCase__ = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Optional[int] , __magic_name__ : Optional[int]=None ):
"""simple docstring"""
lowerCAmelCase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ):
"""simple docstring"""
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 48
|
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {"vocab_file": "vocab.json"}
UpperCAmelCase__ : Optional[Any] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
UpperCAmelCase__ : Union[str, Any] = {"mgp-str": 27}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = VOCAB_FILES_NAMES
snake_case__ :Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int="[GO]" , __magic_name__ : Optional[Any]="[GO]" , __magic_name__ : List[str]="[s]" , __magic_name__ : str="[GO]" , **__magic_name__ : List[Any] ):
"""simple docstring"""
super().__init__(
unk_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , pad_token=__magic_name__ , **__magic_name__ , )
with open(__magic_name__ , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase__ = json.load(__magic_name__ )
lowerCAmelCase__ = {v: k for k, v in self.vocab.items()}
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return len(self.vocab )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = []
for s in text:
char_tokens.extend(__magic_name__ )
return char_tokens
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str ):
"""simple docstring"""
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Tuple ):
"""simple docstring"""
return self.decoder.get(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error("Vocabulary path ({}) should be a directory".format(__magic_name__ ) )
return
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + "\n" )
return (vocab_file,)
| 48
| 1
|
"""simple docstring"""
import os
import string
import sys
lowerCamelCase : Any = 1 << 8
lowerCamelCase : Optional[int] = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 2_7,
"""up""": 6_5 + ARROW_KEY_FLAG,
"""down""": 6_6 + ARROW_KEY_FLAG,
"""right""": 6_7 + ARROW_KEY_FLAG,
"""left""": 6_8 + ARROW_KEY_FLAG,
"""mod_int""": 9_1,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 5_0,
"""delete""": 5_1,
"""pg_up""": 5_3,
"""pg_down""": 5_4,
}
lowerCamelCase : str = KEYMAP["""up"""]
lowerCamelCase : List[str] = KEYMAP["""left"""]
if sys.platform == "win32":
lowerCamelCase : Dict = []
lowerCamelCase : Optional[int] = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(1_0):
lowerCamelCase : Tuple = ord(str(i))
def A__ ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
_SCREAMING_SNAKE_CASE = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(UpperCamelCase__ ) == 0:
# Read the keystroke
_SCREAMING_SNAKE_CASE = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_SCREAMING_SNAKE_CASE = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_SCREAMING_SNAKE_CASE = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(UpperCamelCase__ )
if ord(UpperCamelCase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_SCREAMING_SNAKE_CASE = chr(KEYMAP['''esc'''] )
except KeyError:
_SCREAMING_SNAKE_CASE = cha[1]
else:
_SCREAMING_SNAKE_CASE = ch.decode(UpperCamelCase__ )
else:
_SCREAMING_SNAKE_CASE = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_SCREAMING_SNAKE_CASE = sys.stdin.fileno()
_SCREAMING_SNAKE_CASE = termios.tcgetattr(UpperCamelCase__ )
try:
tty.setraw(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = sys.stdin.read(1 )
finally:
termios.tcsetattr(UpperCamelCase__ , termios.TCSADRAIN , UpperCamelCase__ )
return ch
def A__ ( ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(UpperCamelCase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(UpperCamelCase__ ) == KEYMAP["esc"]:
_SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(UpperCamelCase__ ) == KEYMAP["mod_int"]:
_SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(UpperCamelCase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(UpperCamelCase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(UpperCamelCase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 168
|
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
lowerCamelCase : Any = """facebook/wmt19-en-de"""
lowerCamelCase : int = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
lowerCamelCase : Dict = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
lowerCamelCase : Dict = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
lowerCamelCase : Dict = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
lowerCamelCase : Tuple = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
lowerCamelCase : Optional[int] = """tiny-wmt19-en-de"""
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 168
| 1
|
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
# load base model
lowercase__ = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowercase__ = load_file(_SCREAMING_SNAKE_CASE )
lowercase__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowercase__ = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
lowercase__ = pipeline.text_encoder
else:
lowercase__ = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
lowercase__ = pipeline.unet
# find the target layer
lowercase__ = layer_infos.pop(0 )
while len(_SCREAMING_SNAKE_CASE ) > -1:
try:
lowercase__ = curr_layer.__getattr__(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = layer_infos.pop(0 )
elif len(_SCREAMING_SNAKE_CASE ) == 0:
break
except Exception:
if len(_SCREAMING_SNAKE_CASE ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowercase__ = layer_infos.pop(0 )
lowercase__ = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(_SCREAMING_SNAKE_CASE )
else:
pair_keys.append(_SCREAMING_SNAKE_CASE )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowercase__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowercase__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).unsqueeze(2 ).unsqueeze(3 )
else:
lowercase__ = state_dict[pair_keys[0]].to(torch.floataa )
lowercase__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# update visited list
for item in pair_keys:
visited.append(_SCREAMING_SNAKE_CASE )
return pipeline
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
lowercase_ = parser.parse_args()
lowercase_ = args.base_model_path
lowercase_ = args.checkpoint_path
lowercase_ = args.dump_path
lowercase_ = args.lora_prefix_unet
lowercase_ = args.lora_prefix_text_encoder
lowercase_ = args.alpha
lowercase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowercase_ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 235
|
from __future__ import annotations
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
if len(_SCREAMING_SNAKE_CASE ) < k or k < 0:
raise ValueError('Invalid Input' )
lowercase__ = lowercase__ = sum(array[:k] )
for i in range(len(_SCREAMING_SNAKE_CASE ) - k ):
lowercase__ = current_sum - array[i] + array[i + k]
lowercase__ = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowercase_ = [randint(-1_000, 1_000) for i in range(100)]
lowercase_ = randint(0, 110)
print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 235
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ : Tuple = """mobilenet_v1"""
def __init__( self , A=3 , A=224 , A=1.0 , A=8 , A="relu6" , A=True , A=0.9_9_9 , A=0.0_2 , A=0.0_0_1 , **A , ):
super().__init__(**A )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : Optional[Any] = image_size
_lowerCamelCase : Tuple = depth_multiplier
_lowerCamelCase : Dict = min_depth
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Optional[int] = tf_padding
_lowerCamelCase : Optional[int] = classifier_dropout_prob
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : str = layer_norm_eps
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ : Dict = version.parse("""1.11""" )
@property
def _lowerCAmelCase ( self ):
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def _lowerCAmelCase ( self ):
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def _lowerCAmelCase ( self ):
return 1E-4
| 717
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
a_ = {"""target_lang""": """fi""", """source_lang""": """en"""}
a_ = """>>zh<<"""
a_ = """Helsinki-NLP/"""
if is_torch_available():
a_ = """pt"""
elif is_tf_available():
a_ = """tf"""
else:
a_ = """jax"""
@require_sentencepiece
class A_(SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ : Optional[int] = MarianTokenizer
a_ : Optional[Any] = False
a_ : Optional[int] = True
def _lowerCAmelCase ( self ):
super().setUp()
_lowerCamelCase : Optional[Any] = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
_lowerCamelCase : Tuple = dict(zip(A , range(len(A ) ) ) )
_lowerCamelCase : Union[str, Any] = Path(self.tmpdirname )
save_json(A , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(A , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(A , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(A , save_dir / VOCAB_FILES_NAMES['target_spm'] )
_lowerCamelCase : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self , **A ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowerCAmelCase ( self , A ):
return (
"This is a test",
"This is a test",
)
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[str] = '</s>'
_lowerCamelCase : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(A ) , 9 )
def _lowerCAmelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def _lowerCAmelCase ( self ):
_lowerCamelCase : int = MarianTokenizer.from_pretrained(F"{ORG_NAME}opus-mt-en-de" )
_lowerCamelCase : Dict = en_de_tokenizer(['I am a small frog'] , return_tensors=A )
self.assertIsInstance(A , A )
_lowerCamelCase : Optional[int] = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(A , batch.input_ids[0] )
_lowerCamelCase : Dict = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(A )
_lowerCamelCase : Tuple = [x.name for x in Path(A ).glob('*' )]
self.assertIn('source.spm' , A )
MarianTokenizer.from_pretrained(A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : str = tok(
['I am a small frog' * 1000, 'I am a small frog'] , padding=A , truncation=A , return_tensors=A )
self.assertIsInstance(A , A )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : List[Any] = tok(['I am a tiny frog', 'I am a small frog'] , padding=A , return_tensors=A )
self.assertIsInstance(A , A )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def _lowerCAmelCase ( self ):
# fmt: off
_lowerCamelCase : int = {'input_ids': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Any = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
_lowerCamelCase : List[Any] = 'Tämä on testi'
_lowerCamelCase : Optional[int] = 'This is a test'
_lowerCamelCase : Any = [76, 7, 2047, 2]
_lowerCamelCase : Union[str, Any] = [69, 12, 11, 940, 2]
_lowerCamelCase : List[Any] = tokenizer(A ).input_ids
self.assertListEqual(A , A )
_lowerCamelCase : Optional[int] = tokenizer(text_target=A ).input_ids
self.assertListEqual(A , A )
_lowerCamelCase : Union[str, Any] = tokenizer.decode(A , skip_special_tokens=A )
self.assertEqual(A , A )
| 349
| 0
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__UpperCAmelCase ="examples/"
__UpperCAmelCase ={
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__UpperCAmelCase ={
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__UpperCAmelCase ="README.md"
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
with open(_a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__lowerCamelCase = f.read()
__lowerCamelCase = REPLACE_PATTERNS[pattern]
__lowerCamelCase = replace.replace('''VERSION''' , _a )
__lowerCamelCase = re_pattern.sub(_a , _a )
with open(_a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(_a )
def __lowerCAmelCase ( UpperCamelCase__ ) -> List[str]:
for folder, directories, fnames in os.walk(_a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(_a , _a ) , _a , pattern='''examples''' )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__=False ) -> Optional[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_a , _a , _a )
if not patch:
update_version_in_examples(_a )
def __lowerCAmelCase ( ) -> List[str]:
__lowerCamelCase = "🤗 Transformers currently provides the following architectures"
__lowerCamelCase = "1. Want to contribute a new model?"
with open(_a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__lowerCamelCase = f.readlines()
# Find the start of the list.
__lowerCamelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowerCamelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__lowerCamelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(_a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_a )
def __lowerCAmelCase ( ) -> int:
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__lowerCamelCase = f.read()
__lowerCamelCase = REPLACE_PATTERNS["init"][0].search(_a ).groups()[0]
return packaging.version.parse(_a )
def __lowerCAmelCase ( UpperCamelCase__=False ) -> Optional[Any]:
__lowerCamelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__lowerCamelCase = default_version.base_version
elif patch:
__lowerCamelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__lowerCamelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__lowerCamelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(_a ) == 0:
__lowerCamelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(_a , patch=_a )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def __lowerCAmelCase ( ) -> Union[str, Any]:
__lowerCamelCase = get_version()
__lowerCamelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__lowerCamelCase = current_version.base_version
# Check with the user we got that right.
__lowerCamelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(_a ) == 0:
__lowerCamelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(_a )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__UpperCAmelCase =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 546
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def lowercase ( _a=None ,_a=None ) -> List[Any]:
return field(default_factory=lambda: default ,metadata=_a )
@dataclass
class UpperCAmelCase__ :
snake_case_ = list_field(
default=[] , metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
} , )
snake_case_ = list_field(
default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
snake_case_ = list_field(
default=[8, 32, 128, 512] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , )
snake_case_ = field(
default=snake_case__ , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , )
snake_case_ = field(
default=snake_case__ , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , )
snake_case_ = field(
default=snake_case__ , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Benchmark training of model'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Verbose memory tracing'''} )
snake_case_ = field(
default=snake_case__ , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , )
snake_case_ = field(
default=snake_case__ , metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
} , )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Trace memory line by line'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Save result to a CSV file'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Save all print statements in a log file'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Whether to print environment information'''} )
snake_case_ = field(
default=snake_case__ , metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
} , )
snake_case_ = field(
default=F'inference_time_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , )
snake_case_ = field(
default=F'inference_memory_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , )
snake_case_ = field(
default=F'train_time_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , )
snake_case_ = field(
default=F'train_memory_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , )
snake_case_ = field(
default=F'env_info_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , )
snake_case_ = field(
default=F'log_{round(time() )}.csv' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , )
snake_case_ = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} )
snake_case_ = field(
default=snake_case__ , metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
} , )
def snake_case_ ( self ):
"""simple docstring"""
warnings.warn(
F"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , A__ , )
def snake_case_ ( self ):
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def snake_case_ ( self ):
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = ['bert-base-cased']." )
return self.models
@property
def snake_case_ ( self ):
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True
| 137
| 0
|
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = (CMStochasticIterativeScheduler,)
lowerCamelCase_ = 10
def _snake_case ( self :Optional[int] , **__A :List[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 80.0,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def _snake_case ( self :str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0](**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps[0]
SCREAMING_SNAKE_CASE__ = scheduler.timesteps[1]
SCREAMING_SNAKE_CASE__ = self.dummy_sample
SCREAMING_SNAKE_CASE__ = 0.1 * sample
SCREAMING_SNAKE_CASE__ = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
SCREAMING_SNAKE_CASE__ = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self :Dict ) -> Tuple:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def _snake_case ( self :Any ) -> Optional[Any]:
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_SCREAMING_SNAKE_CASE )
def _snake_case ( self :Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ = 1
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_SCREAMING_SNAKE_CASE ):
# 1. scale model input
SCREAMING_SNAKE_CASE__ = scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict noise residual
SCREAMING_SNAKE_CASE__ = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 192.7614 ) < 1E-2
assert abs(result_mean.item() - 0.2_5_1_0 ) < 1E-3
def _snake_case ( self :Dict ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ = [106, 0]
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
SCREAMING_SNAKE_CASE__ = scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict noise residual
SCREAMING_SNAKE_CASE__ = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 347.6357 ) < 1E-2
assert abs(result_mean.item() - 0.4_5_2_7 ) < 1E-3
def _snake_case ( self :str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ = [39, 30, 12, 15, 0]
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
def _snake_case ( self :Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ = [39, 30, 12, 1, 0]
SCREAMING_SNAKE_CASE__ = len(_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE )
def _snake_case ( self :List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
| 713
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "pix2struct_text_model"
lowerCamelCase_ = ["past_key_values"]
lowerCamelCase_ = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self :Union[str, Any] , __A :Any=5_0244 , __A :Optional[Any]=768 , __A :Tuple=64 , __A :List[str]=2048 , __A :int=12 , __A :str=12 , __A :Any=32 , __A :Tuple=128 , __A :int=0.1 , __A :str=1E-6 , __A :Optional[Any]=1.0 , __A :Union[str, Any]="gelu_new" , __A :Any=0 , __A :List[str]=False , __A :Optional[Any]=0 , __A :int=1 , __A :Optional[int]=False , __A :Optional[Any]=True , **__A :List[Any] , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = d_kv
SCREAMING_SNAKE_CASE__ = d_ff
SCREAMING_SNAKE_CASE__ = num_layers
SCREAMING_SNAKE_CASE__ = num_heads
SCREAMING_SNAKE_CASE__ = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ = relative_attention_max_distance
SCREAMING_SNAKE_CASE__ = dropout_rate
SCREAMING_SNAKE_CASE__ = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = eos_token_id
SCREAMING_SNAKE_CASE__ = decoder_start_token_id
# for backwards compatibility
SCREAMING_SNAKE_CASE__ = dense_act_fn
super().__init__(
pad_token_id=__A , eos_token_id=__A , decoder_start_token_id=__A , tie_word_embeddings=__A , is_decoder=__A , **__A , )
@classmethod
def _snake_case ( cls :Optional[int] , __A :Union[str, os.PathLike] , **__A :Optional[int] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cls.get_config_dict(__A , **__A )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
SCREAMING_SNAKE_CASE__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "pix2struct_vision_model"
def __init__( self :Optional[int] , __A :int=768 , __A :Optional[Any]=768 , __A :Union[str, Any]=2048 , __A :int=64 , __A :Union[str, Any]=12 , __A :str=12 , __A :Any="gelu_new" , __A :List[Any]=1E-6 , __A :Dict=0.0 , __A :int=0.0 , __A :int=1E-10 , __A :Dict=1.0 , __A :int=4096 , __A :int=32 , __A :int=128 , **__A :Tuple , ) -> str:
"""simple docstring"""
super().__init__(**__A )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = patch_embed_hidden_size
SCREAMING_SNAKE_CASE__ = d_ff
SCREAMING_SNAKE_CASE__ = dropout_rate
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = dense_act_fn
SCREAMING_SNAKE_CASE__ = seq_len
SCREAMING_SNAKE_CASE__ = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ = relative_attention_max_distance
SCREAMING_SNAKE_CASE__ = d_kv
@classmethod
def _snake_case ( cls :str , __A :Union[str, os.PathLike] , **__A :str ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
SCREAMING_SNAKE_CASE__ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "pix2struct"
lowerCamelCase_ = True
def __init__( self :str , __A :Optional[Any]=None , __A :List[str]=None , __A :Optional[Any]=1.0 , __A :Optional[Any]=0.0_2 , __A :Any=False , __A :Tuple=False , __A :Any=True , **__A :Dict , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(tie_word_embeddings=__A , is_encoder_decoder=__A , **__A )
if text_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE__ = PixaStructTextConfig(**__A )
SCREAMING_SNAKE_CASE__ = PixaStructVisionConfig(**__A )
SCREAMING_SNAKE_CASE__ = self.text_config.decoder_start_token_id
SCREAMING_SNAKE_CASE__ = self.text_config.pad_token_id
SCREAMING_SNAKE_CASE__ = self.text_config.eos_token_id
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = self.initializer_range
SCREAMING_SNAKE_CASE__ = self.initializer_range
SCREAMING_SNAKE_CASE__ = is_vqa
@classmethod
def _snake_case ( cls :Union[str, Any] , __A :PixaStructTextConfig , __A :PixaStructVisionConfig , **__A :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A )
def _snake_case ( self :str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.text_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output
| 59
| 0
|
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
a_ = TypeVar("""KT""")
a_ = TypeVar("""VT""")
class UpperCAmelCase__ ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self: Optional[Any] , __lowerCAmelCase: KT | str = "root" , __lowerCAmelCase: VT | None = None ) -> int:
'''simple docstring'''
__UpperCAmelCase = key
__UpperCAmelCase = value
__UpperCAmelCase = []
def __repr__( self: Dict ) -> str:
'''simple docstring'''
return F'''Node({self.key}: {self.value})'''
@property
def _UpperCAmelCase ( self: Dict ) -> int:
'''simple docstring'''
return len(self.forward )
class UpperCAmelCase__ ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self: Optional[Any] , __lowerCAmelCase: float = 0.5 , __lowerCAmelCase: int = 16 ) -> int:
'''simple docstring'''
__UpperCAmelCase = Node[KT, VT]()
__UpperCAmelCase = 0
__UpperCAmelCase = p
__UpperCAmelCase = max_level
def __str__( self: int ) -> str:
'''simple docstring'''
__UpperCAmelCase = list(self )
if len(__lowerCAmelCase ) == 0:
return F'''SkipList(level={self.level})'''
__UpperCAmelCase = max((len(str(__lowerCAmelCase ) ) for item in items) , default=4 )
__UpperCAmelCase = max(__lowerCAmelCase , 4 ) + 4
__UpperCAmelCase = self.head
__UpperCAmelCase = []
__UpperCAmelCase = node.forward.copy()
lines.append(F'''[{node.key}]'''.ljust(__lowerCAmelCase , "-" ) + "* " * len(__lowerCAmelCase ) )
lines.append(" " * label_size + "| " * len(__lowerCAmelCase ) )
while len(node.forward ) != 0:
__UpperCAmelCase = node.forward[0]
lines.append(
F'''[{node.key}]'''.ljust(__lowerCAmelCase , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(__lowerCAmelCase ) )
__UpperCAmelCase = node.forward
lines.append("None".ljust(__lowerCAmelCase ) + "* " * len(__lowerCAmelCase ) )
return F'''SkipList(level={self.level})\n''' + "\n".join(__lowerCAmelCase )
def __iter__( self: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
__UpperCAmelCase = node.forward[0]
def _UpperCAmelCase ( self: List[str] ) -> int:
'''simple docstring'''
__UpperCAmelCase = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def _UpperCAmelCase ( self: Dict , __lowerCAmelCase: List[str] ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
'''simple docstring'''
__UpperCAmelCase = []
__UpperCAmelCase = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__UpperCAmelCase = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__lowerCAmelCase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _UpperCAmelCase ( self: Optional[Any] , __lowerCAmelCase: KT ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase = self._locate_node(__lowerCAmelCase )
if node is not None:
for i, update_node in enumerate(__lowerCAmelCase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__UpperCAmelCase = node.forward[i]
else:
__UpperCAmelCase = update_node.forward[:i]
def _UpperCAmelCase ( self: Union[str, Any] , __lowerCAmelCase: KT , __lowerCAmelCase: VT ) -> Any:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase = self._locate_node(__lowerCAmelCase )
if node is not None:
__UpperCAmelCase = value
else:
__UpperCAmelCase = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __lowerCAmelCase ):
update_vector.append(self.head )
__UpperCAmelCase = level
__UpperCAmelCase = Node(__lowerCAmelCase , __lowerCAmelCase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__lowerCAmelCase )
else:
__UpperCAmelCase = new_node
def _UpperCAmelCase ( self: Optional[Any] , __lowerCAmelCase: VT ) -> VT | None:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase = self._locate_node(__lowerCAmelCase )
if node is not None:
return node.value
return None
def __lowerCAmelCase ( ) -> Optional[Any]:
__UpperCAmelCase = SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 12 )
skip_list.insert("Key3" , 41 )
skip_list.insert("Key4" , -19 )
__UpperCAmelCase = skip_list.head
__UpperCAmelCase = {}
while node.level != 0:
__UpperCAmelCase = node.forward[0]
__UpperCAmelCase = node.value
assert len(A_ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __lowerCAmelCase ( ) -> Optional[int]:
__UpperCAmelCase = SkipList()
skip_list.insert("Key1" , 10 )
skip_list.insert("Key1" , 12 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 10 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 10 )
__UpperCAmelCase = skip_list.head
__UpperCAmelCase = {}
while node.level != 0:
__UpperCAmelCase = node.forward[0]
__UpperCAmelCase = node.value
if len(A_ ) != 4:
print()
assert len(A_ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __lowerCAmelCase ( ) -> str:
__UpperCAmelCase = SkipList()
assert skip_list.find("Some key" ) is None
def __lowerCAmelCase ( ) -> List[Any]:
__UpperCAmelCase = SkipList()
skip_list.insert("Key2" , 20 )
assert skip_list.find("Key2" ) == 20
skip_list.insert("Some Key" , 10 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 13 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 10
assert skip_list.find("V" ) == 13
def __lowerCAmelCase ( ) -> Optional[Any]:
__UpperCAmelCase = SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def __lowerCAmelCase ( ) -> Optional[Any]:
__UpperCAmelCase = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def __lowerCAmelCase ( ) -> List[Any]:
__UpperCAmelCase = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 14
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def __lowerCAmelCase ( ) -> Dict:
__UpperCAmelCase = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 1_42 )
skip_list.insert("Key2" , 15 )
skip_list.delete("X" )
def traverse_keys(A_ : List[Any] ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(A_ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __lowerCAmelCase ( ) -> str:
def is_sorted(A_ : int ):
return all(next_item >= item for item, next_item in zip(A_ , lst[1:] ) )
__UpperCAmelCase = SkipList()
for i in range(10 ):
skip_list.insert(A_ , A_ )
assert is_sorted(list(A_ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(A_ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(A_ ) )
def __lowerCAmelCase ( ) -> int:
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __lowerCAmelCase ( ) -> Union[str, Any]:
__UpperCAmelCase = SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 221
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a_ = logging.get_logger(__name__)
a_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ = {
"""vocab_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"""
},
"""merges_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"""
},
}
a_ = {"""allegro/herbert-base-cased""": 514}
a_ = {}
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Tuple = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : Tuple = HerbertTokenizer
def __init__( self: Optional[Any] , __lowerCAmelCase: List[str]=None , __lowerCAmelCase: Optional[int]=None , __lowerCAmelCase: List[str]=None , __lowerCAmelCase: str="<s>" , __lowerCAmelCase: List[str]="<unk>" , __lowerCAmelCase: Optional[int]="<pad>" , __lowerCAmelCase: Optional[Any]="<mask>" , __lowerCAmelCase: Union[str, Any]="</s>" , **__lowerCAmelCase: List[Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , **__lowerCAmelCase , )
def _UpperCAmelCase ( self: Tuple , __lowerCAmelCase: List[int] , __lowerCAmelCase: Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase = [self.cls_token_id]
__UpperCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: List[int] , __lowerCAmelCase: Optional[List[int]] = None , __lowerCAmelCase: bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1]
def _UpperCAmelCase ( self: int , __lowerCAmelCase: List[int] , __lowerCAmelCase: Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self: Dict , __lowerCAmelCase: str , __lowerCAmelCase: Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
__UpperCAmelCase = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
| 221
| 1
|
"""simple docstring"""
import os
import platform
import sys
lowerCAmelCase_ : List[str] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 719
|
"""simple docstring"""
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = 0
UpperCAmelCase = len(lowerCAmelCase )
for i in range(n - 1 ):
for j in range(i + 1 , lowerCAmelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
if len(lowerCAmelCase ) <= 1:
return arr, 0
UpperCAmelCase = len(lowerCAmelCase ) // 2
UpperCAmelCase = arr[0:mid]
UpperCAmelCase = arr[mid:]
UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase = _count_cross_inversions(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = UpperCAmelCase = UpperCAmelCase = 0
while i < len(lowerCAmelCase ) and j < len(lowerCAmelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowerCAmelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowerCAmelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
UpperCAmelCase = count_inversions_bf(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , lowerCAmelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
UpperCAmelCase = count_inversions_bf(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , lowerCAmelCase )
# an empty list should also have zero inversions
UpperCAmelCase = []
UpperCAmelCase = count_inversions_bf(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , lowerCAmelCase )
if __name__ == "__main__":
main()
| 378
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : Any = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 488
|
'''simple docstring'''
import math
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return math.pow(SCREAMING_SNAKE_CASE__ , 2 ) - a
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return 2 * x
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = 2.0
while start <= a:
_snake_case = math.pow(SCREAMING_SNAKE_CASE__ , 2 )
return start
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 99_99 , SCREAMING_SNAKE_CASE__ = 0.00000000000001 ):
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
_snake_case = get_initial_point(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
_snake_case = value
_snake_case = value - fx(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / fx_derivative(SCREAMING_SNAKE_CASE__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 672
| 0
|
'''simple docstring'''
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_A : Any ='''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
_A : List[Any] =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_A : Any =dict(zip(vocab, range(len(vocab))))
_A : Optional[Any] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Optional[Any] =Path(tmpdirname)
_A : Union[str, Any] =build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
_A : Tuple =build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
_A : Union[str, Any] =build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
_A : List[str] =FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_A : List[str] =FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_0_0_0,
tgt_vocab_size=1_0_0_0,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_A : Optional[Any] =FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
_A : Optional[int] =tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_A : List[Any] =tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 717
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Optional[Any] ={'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4
| 0
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_A = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __UpperCamelCase ( _A ):
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def __UpperCamelCase ( _A ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_A )
def __UpperCamelCase ( _A ):
from transformers.testing_utils import pytest_terminal_summary_main
lowerCAmelCase_ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_A , id=_A )
def __UpperCamelCase ( _A , _A ):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowerCAmelCase_ = 0
# Doctest custom flag to ignore output.
_A = doctest.register_optionflag('''IGNORE_RESULT''')
_A = doctest.OutputChecker
class A ( __UpperCAmelCase ):
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
_A = CustomOutputChecker
_A = HfDoctestModule
_A = HfDocTestParser
| 431
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_A = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_A = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_A )[0]
@deprecated(_A , '''Please use tf.data to implement this functionality.''' )
def __UpperCamelCase ( _A ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
lowerCAmelCase_ = _readaa(_A )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
lowerCAmelCase_ = _readaa(_A )
lowerCAmelCase_ = _readaa(_A )
lowerCAmelCase_ = _readaa(_A )
lowerCAmelCase_ = bytestream.read(rows * cols * num_images )
lowerCAmelCase_ = numpy.frombuffer(_A , dtype=numpy.uinta )
lowerCAmelCase_ = data.reshape(_A , _A , _A , 1 )
return data
@deprecated(_A , '''Please use tf.one_hot on tensors.''' )
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = labels_dense.shape[0]
lowerCAmelCase_ = numpy.arange(_A ) * num_classes
lowerCAmelCase_ = numpy.zeros((num_labels, num_classes) )
lowerCAmelCase_ = 1
return labels_one_hot
@deprecated(_A , '''Please use tf.data to implement this functionality.''' )
def __UpperCamelCase ( _A , _A=False , _A=10 ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
lowerCAmelCase_ = _readaa(_A )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
lowerCAmelCase_ = _readaa(_A )
lowerCAmelCase_ = bytestream.read(_A )
lowerCAmelCase_ = numpy.frombuffer(_A , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_A , _A )
return labels
class A :
@deprecated(
UpperCamelCase__, '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''', )
def __init__( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__=False, UpperCamelCase__=False, UpperCamelCase__=dtypes.floataa, UpperCamelCase__=True, UpperCamelCase__=None, ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = random_seed.get_seed(UpperCamelCase__ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCAmelCase_ = dtypes.as_dtype(UpperCamelCase__ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
lowerCAmelCase_ = 1_0000
lowerCAmelCase_ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"images.shape: {images.shape} labels.shape: {labels.shape}"
lowerCAmelCase_ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCAmelCase_ = images.reshape(
images.shape[0], images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCAmelCase_ = images.astype(numpy.floataa )
lowerCAmelCase_ = numpy.multiply(UpperCamelCase__, 1.0 / 255.0 )
lowerCAmelCase_ = images
lowerCAmelCase_ = labels
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self._images
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self._labels
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self._num_examples
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self._epochs_completed
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=False, UpperCamelCase__=True ):
"""simple docstring"""
if fake_data:
lowerCAmelCase_ = [1] * 784
lowerCAmelCase_ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(UpperCamelCase__ )],
[fake_label for _ in range(UpperCamelCase__ )],
)
lowerCAmelCase_ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCAmelCase_ = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCamelCase__ )
lowerCAmelCase_ = self.images[perma]
lowerCAmelCase_ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCAmelCase_ = self._num_examples - start
lowerCAmelCase_ = self._images[start : self._num_examples]
lowerCAmelCase_ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCAmelCase_ = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCamelCase__ )
lowerCAmelCase_ = self.images[perm]
lowerCAmelCase_ = self.labels[perm]
# Start next epoch
lowerCAmelCase_ = 0
lowerCAmelCase_ = batch_size - rest_num_examples
lowerCAmelCase_ = self._index_in_epoch
lowerCAmelCase_ = self._images[start:end]
lowerCAmelCase_ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part), axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part), axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCAmelCase_ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_A , '''Please write your own downloading logic.''' )
def __UpperCamelCase ( _A , _A , _A ):
if not gfile.Exists(_A ):
gfile.MakeDirs(_A )
lowerCAmelCase_ = os.path.join(_A , _A )
if not gfile.Exists(_A ):
urllib.request.urlretrieve(_A , _A ) # noqa: S310
with gfile.GFile(_A ) as f:
lowerCAmelCase_ = f.size()
print('''Successfully downloaded''' , _A , _A , '''bytes.''' )
return filepath
@deprecated(
_A , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def __UpperCamelCase ( _A , _A=False , _A=False , _A=dtypes.floataa , _A=True , _A=5000 , _A=None , _A=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_A , one_hot=_A , dtype=_A , seed=_A )
lowerCAmelCase_ = fake()
lowerCAmelCase_ = fake()
lowerCAmelCase_ = fake()
return _Datasets(train=_A , validation=_A , test=_A )
if not source_url: # empty string check
lowerCAmelCase_ = DEFAULT_SOURCE_URL
lowerCAmelCase_ = '''train-images-idx3-ubyte.gz'''
lowerCAmelCase_ = '''train-labels-idx1-ubyte.gz'''
lowerCAmelCase_ = '''t10k-images-idx3-ubyte.gz'''
lowerCAmelCase_ = '''t10k-labels-idx1-ubyte.gz'''
lowerCAmelCase_ = _maybe_download(
_A , _A , source_url + train_images_file )
with gfile.Open(_A , '''rb''' ) as f:
lowerCAmelCase_ = _extract_images(_A )
lowerCAmelCase_ = _maybe_download(
_A , _A , source_url + train_labels_file )
with gfile.Open(_A , '''rb''' ) as f:
lowerCAmelCase_ = _extract_labels(_A , one_hot=_A )
lowerCAmelCase_ = _maybe_download(
_A , _A , source_url + test_images_file )
with gfile.Open(_A , '''rb''' ) as f:
lowerCAmelCase_ = _extract_images(_A )
lowerCAmelCase_ = _maybe_download(
_A , _A , source_url + test_labels_file )
with gfile.Open(_A , '''rb''' ) as f:
lowerCAmelCase_ = _extract_labels(_A , one_hot=_A )
if not 0 <= validation_size <= len(_A ):
lowerCAmelCase_ = (
'''Validation size should be between 0 and '''
f"{len(_A )}. Received: {validation_size}."
)
raise ValueError(_A )
lowerCAmelCase_ = train_images[:validation_size]
lowerCAmelCase_ = train_labels[:validation_size]
lowerCAmelCase_ = train_images[validation_size:]
lowerCAmelCase_ = train_labels[validation_size:]
lowerCAmelCase_ = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
lowerCAmelCase_ = _DataSet(_A , _A , **_A )
lowerCAmelCase_ = _DataSet(_A , _A , **_A )
lowerCAmelCase_ = _DataSet(_A , _A , **_A )
return _Datasets(train=_A , validation=_A , test=_A )
| 431
| 1
|
import functools
from typing import Any
def _UpperCamelCase (a__ :str , a__ :list[str] ):
"""simple docstring"""
if not isinstance(a__ , a__ ) or len(a__ ) == 0:
raise ValueError("""the string should be not empty string""" )
if not isinstance(a__ , a__ ) or not all(
isinstance(a__ , a__ ) and len(a__ ) > 0 for item in words ):
raise ValueError("""the words should be a list of non-empty strings""" )
# Build trie
UpperCamelCase__ = {}
UpperCamelCase__ = """WORD_KEEPER"""
for word in words:
UpperCamelCase__ = trie
for c in word:
if c not in trie_node:
UpperCamelCase__ = {}
UpperCamelCase__ = trie_node[c]
UpperCamelCase__ = True
UpperCamelCase__ = len(a__ )
# Dynamic programming method
@functools.cache
def is_breakable(a__ :int ) -> bool:
if index == len_string:
return True
UpperCamelCase__ = trie
for i in range(a__ , a__ ):
UpperCamelCase__ = trie_node.get(string[i] , a__ )
if trie_node is None:
return False
if trie_node.get(a__ , a__ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 548
|
from typing import Any
def _UpperCamelCase (a__ :list ):
"""simple docstring"""
if not input_list:
return []
UpperCamelCase__ = [input_list.count(a__ ) for value in input_list]
UpperCamelCase__ = max(a__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(a__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 548
| 1
|
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (a_ ):
'''simple docstring'''
_snake_case : List[str] = 'linear'
_snake_case : Union[str, Any] = 'cosine'
_snake_case : Dict = 'cosine_with_restarts'
_snake_case : List[Any] = 'polynomial'
_snake_case : int = 'constant'
_snake_case : Optional[int] = 'constant_with_warmup'
_snake_case : str = 'piecewise_constant'
def lowercase__ ( __snake_case : str , __snake_case : Tuple = -1 ):
'''simple docstring'''
return LambdaLR(__snake_case , lambda __snake_case : 1 , last_epoch=__snake_case )
def lowercase__ ( __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Optional[Any] = -1 ):
'''simple docstring'''
def lr_lambda(__snake_case : List[Any] ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1.0 , __snake_case ) )
return 1.0
return LambdaLR(__snake_case , __snake_case , last_epoch=__snake_case )
def lowercase__ ( __snake_case : Any , __snake_case : Any , __snake_case : int = -1 ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = {}
UpperCAmelCase_ : int = step_rules.split(',' )
for rule_str in rule_list[:-1]:
UpperCAmelCase_ , UpperCAmelCase_ : str = rule_str.split(':' )
UpperCAmelCase_ : Any = int(__snake_case )
UpperCAmelCase_ : List[str] = float(__snake_case )
UpperCAmelCase_ : Dict = value
UpperCAmelCase_ : str = float(rule_list[-1] )
def create_rules_function(__snake_case : Tuple , __snake_case : Dict ):
def rule_func(__snake_case : Dict ) -> float:
UpperCAmelCase_ : Union[str, Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__snake_case ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCAmelCase_ : Dict = create_rules_function(__snake_case , __snake_case )
return LambdaLR(__snake_case , __snake_case , last_epoch=__snake_case )
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : int , __snake_case : int=-1 ):
'''simple docstring'''
def lr_lambda(__snake_case : Optional[int] ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1 , __snake_case ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__snake_case , __snake_case , __snake_case )
def lowercase__ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] = 0.5 , __snake_case : List[str] = -1 ):
'''simple docstring'''
def lr_lambda(__snake_case : Optional[int] ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1 , __snake_case ) )
UpperCAmelCase_ : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__snake_case ) * 2.0 * progress )) )
return LambdaLR(__snake_case , __snake_case , __snake_case )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Dict , __snake_case : int = 1 , __snake_case : List[str] = -1 ):
'''simple docstring'''
def lr_lambda(__snake_case : str ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1 , __snake_case ) )
UpperCAmelCase_ : Any = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__snake_case ) * progress) % 1.0) )) )
return LambdaLR(__snake_case , __snake_case , __snake_case )
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Any , __snake_case : Optional[Any]=1E-7 , __snake_case : List[str]=1.0 , __snake_case : Any=-1 ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(F"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(__snake_case : str ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1 , __snake_case ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCAmelCase_ : Union[str, Any] = lr_init - lr_end
UpperCAmelCase_ : List[Any] = num_training_steps - num_warmup_steps
UpperCAmelCase_ : Tuple = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCAmelCase_ : Tuple = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__snake_case , __snake_case , __snake_case )
__UpperCAmelCase = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowercase__ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] = None , __snake_case : Dict = None , __snake_case : Dict = None , __snake_case : Tuple = 1 , __snake_case : int = 1.0 , __snake_case : Any = -1 , ):
'''simple docstring'''
UpperCAmelCase_ : str = SchedulerType(__snake_case )
UpperCAmelCase_ : Optional[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__snake_case , last_epoch=__snake_case )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__snake_case , step_rules=__snake_case , last_epoch=__snake_case )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__snake_case , num_warmup_steps=__snake_case , last_epoch=__snake_case )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__snake_case , num_warmup_steps=__snake_case , num_training_steps=__snake_case , num_cycles=__snake_case , last_epoch=__snake_case , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__snake_case , num_warmup_steps=__snake_case , num_training_steps=__snake_case , power=__snake_case , last_epoch=__snake_case , )
return schedule_func(
__snake_case , num_warmup_steps=__snake_case , num_training_steps=__snake_case , last_epoch=__snake_case )
| 406
|
"""simple docstring"""
import socket
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
UpperCAmelCase = socket.gethostname()
UpperCAmelCase = 12312
sock.connect((host, port) )
sock.send(b"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
UpperCAmelCase = sock.recv(1024 )
if not data:
break
out_file.write(lowerCAmelCase )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 673
| 0
|
"""simple docstring"""
from graphs.minimum_spanning_tree_kruskal import kruskal
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = 9
UpperCAmelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCAmelCase = kruskal(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(lowerCAmelCase ) == sorted(lowerCAmelCase )
| 378
|
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 378
| 1
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__lowerCamelCase : str = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629
|
import os
def A_ ( ) -> Union[str, Any]:
with open(os.path.dirname(_lowerCAmelCase ) + "/grid.txt" ) as f:
UpperCamelCase : Optional[Any] = [] # noqa: E741
for _ in range(20 ):
l.append([int(_lowerCAmelCase ) for x in f.readline().split()] )
UpperCamelCase : str = 0
# right
for i in range(20 ):
for j in range(17 ):
UpperCamelCase : int = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
UpperCamelCase : List[Any] = temp
# down
for i in range(17 ):
for j in range(20 ):
UpperCamelCase : List[str] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
UpperCamelCase : List[str] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
UpperCamelCase : Any = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
UpperCamelCase : Tuple = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
UpperCamelCase : Tuple = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
UpperCamelCase : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 629
| 1
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowerCamelCase : Dict = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self , A_ , A_ = None , A_ = None , A_ = None , A_ = True , ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = [file for file in os.listdir(A_ ) if os.path.isfile(os.path.join(A_ , A_ ) )]
if identifier is not None:
UpperCamelCase : int = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(A_ , A_ ):
for n_ in n_identifier:
UpperCamelCase : Any = [file for file in files if n_ not in file]
else:
UpperCamelCase : Optional[int] = [file for file in files if n_identifier not in file]
UpperCamelCase : Dict = ignore_files or []
ignore_files.append("__init__.py" )
UpperCamelCase : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , A_ )
if only_modules:
UpperCamelCase : str = file.split("." )[0]
try:
UpperCamelCase : Union[str, Any] = getattr(A_ , A_ )
UpperCamelCase : int = doctest.DocTestSuite(A_ )
UpperCamelCase : Union[str, Any] = unittest.TextTestRunner().run(A_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
UpperCamelCase : Tuple = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = Path("src/transformers" )
UpperCamelCase : Dict = "modeling"
UpperCamelCase : Union[str, Any] = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(A_ , identifier=A_ , ignore_files=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = Path("src/transformers" )
UpperCamelCase : Tuple = "tokenization"
self.analyze_directory(A_ , identifier=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = Path("src/transformers" )
UpperCamelCase : Optional[int] = "configuration"
self.analyze_directory(A_ , identifier=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = Path("src/transformers" )
UpperCamelCase : Union[str, Any] = ["configuration", "modeling", "tokenization"]
self.analyze_directory(A_ , n_identifier=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = Path("docs/source" )
UpperCamelCase : Optional[Any] = ["favicon.ico"]
self.analyze_directory(A_ , ignore_files=A_ , only_modules=A_ )
| 701
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Union[str, Any] = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 38
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Dict = '''cvt'''
def __init__(self , __magic_name__=3 , __magic_name__=[7, 3, 3] , __magic_name__=[4, 2, 2] , __magic_name__=[2, 1, 1] , __magic_name__=[64, 192, 384] , __magic_name__=[1, 3, 6] , __magic_name__=[1, 2, 10] , __magic_name__=[4.0, 4.0, 4.0] , __magic_name__=[0.0, 0.0, 0.0] , __magic_name__=[0.0, 0.0, 0.0] , __magic_name__=[0.0, 0.0, 0.1] , __magic_name__=[True, True, True] , __magic_name__=[False, False, True] , __magic_name__=["dw_bn", "dw_bn", "dw_bn"] , __magic_name__=[3, 3, 3] , __magic_name__=[1, 1, 1] , __magic_name__=[2, 2, 2] , __magic_name__=[1, 1, 1] , __magic_name__=[1, 1, 1] , __magic_name__=0.02 , __magic_name__=1e-12 , **__magic_name__ , ) -> List[str]:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : int = num_channels
snake_case_ : str = patch_sizes
snake_case_ : Dict = patch_stride
snake_case_ : str = patch_padding
snake_case_ : List[str] = embed_dim
snake_case_ : int = num_heads
snake_case_ : Union[str, Any] = depth
snake_case_ : Union[str, Any] = mlp_ratio
snake_case_ : List[str] = attention_drop_rate
snake_case_ : Tuple = drop_rate
snake_case_ : Any = drop_path_rate
snake_case_ : Optional[int] = qkv_bias
snake_case_ : Tuple = cls_token
snake_case_ : Dict = qkv_projection_method
snake_case_ : Dict = kernel_qkv
snake_case_ : List[Any] = padding_kv
snake_case_ : Dict = stride_kv
snake_case_ : List[str] = padding_q
snake_case_ : List[Any] = stride_q
snake_case_ : Dict = initializer_range
snake_case_ : Dict = layer_norm_eps
| 60
|
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowercase_ :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=sys.maxsize ):
"""simple docstring"""
a_ = """bilinear"""
a_ = max_size
a_ = short_edge_length
def __call__( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = []
for img in imgs:
a_ , a_ = img.shape[:2]
# later: provide list and randomly choose index for resize
a_ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
a_ = size * 1.0 / min(_UpperCAmelCase , _UpperCAmelCase )
if h < w:
a_ , a_ = size, scale * w
else:
a_ , a_ = scale * h, size
if max(_UpperCAmelCase , _UpperCAmelCase ) > self.max_size:
a_ = self.max_size * 1.0 / max(_UpperCAmelCase , _UpperCAmelCase )
a_ = newh * scale
a_ = neww * scale
a_ = int(neww + 0.5 )
a_ = int(newh + 0.5 )
if img.dtype == np.uinta:
a_ = Image.fromarray(_UpperCAmelCase )
a_ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
a_ = np.asarray(_UpperCAmelCase )
else:
a_ = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
a_ = nn.functional.interpolate(
_UpperCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=_UpperCAmelCase ).squeeze(0 )
img_augs.append(_UpperCAmelCase )
return img_augs
class lowercase_ :
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
a_ = cfg.INPUT.FORMAT
a_ = cfg.SIZE_DIVISIBILITY
a_ = cfg.PAD_VALUE
a_ = cfg.INPUT.MAX_SIZE_TEST
a_ = cfg.MODEL.DEVICE
a_ = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
a_ = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
a_ = lambda _UpperCAmelCase : (x - self.pixel_mean) / self.pixel_std
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = tuple(max(_UpperCAmelCase ) for s in zip(*[img.shape for img in images] ) )
a_ = [im.shape[-2:] for im in images]
a_ = [
nn.functional.pad(
_UpperCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_UpperCAmelCase , _UpperCAmelCase )
]
return torch.stack(_UpperCAmelCase ), torch.tensor(_UpperCAmelCase )
def __call__( self , _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
with torch.no_grad():
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = [images]
if single_image:
assert len(_UpperCAmelCase ) == 1
for i in range(len(_UpperCAmelCase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(_UpperCAmelCase , images.pop(_UpperCAmelCase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
_UpperCAmelCase , torch.as_tensor(img_tensorize(images.pop(_UpperCAmelCase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
a_ = torch.tensor([im.shape[:2] for im in images] )
a_ = self.aug(_UpperCAmelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
a_ = [self.normalizer(_UpperCAmelCase ) for x in images]
# now pad them to do the following operations
a_ , a_ = self.pad(_UpperCAmelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
a_ = torch.true_divide(_UpperCAmelCase , _UpperCAmelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
assert torch.isfinite(UpperCAmelCase__ ).all(), "Box tensor contains infinite or NaN!"
a_ , a_ = box_size
tensor[:, 0].clamp_(min=0 , max=UpperCAmelCase__ )
tensor[:, 1].clamp_(min=0 , max=UpperCAmelCase__ )
tensor[:, 2].clamp_(min=0 , max=UpperCAmelCase__ )
tensor[:, 3].clamp_(min=0 , max=UpperCAmelCase__ )
| 483
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase (__UpperCamelCase : list , __UpperCamelCase : int | None = None , __UpperCamelCase : int | None = None ):
"""simple docstring"""
if start is None:
__UpperCamelCase =0
if end is None:
__UpperCamelCase =len(__UpperCamelCase ) - 1
if start >= end:
return
__UpperCamelCase =(start + end) // 2
slowsort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
slowsort(__UpperCamelCase , mid + 1 , __UpperCamelCase )
if sequence[end] < sequence[mid]:
__UpperCamelCase , __UpperCamelCase =sequence[mid], sequence[end]
slowsort(__UpperCamelCase , __UpperCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 296
|
"""simple docstring"""
import qiskit
def lowerCAmelCase (__UpperCamelCase : int , __UpperCamelCase : int ):
"""simple docstring"""
__UpperCamelCase =qiskit.Aer.get_backend('''aer_simulator''' )
__UpperCamelCase =qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__UpperCamelCase =qiskit.execute(__UpperCamelCase , __UpperCamelCase , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(__UpperCamelCase )
if __name__ == "__main__":
__lowercase = half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''')
| 296
| 1
|
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case = VQModel
_snake_case = 'sample'
@property
def A__ ( self , snake_case_=(32, 32) ) -> Dict:
__lowerCAmelCase = 4
__lowerCAmelCase = 3
__lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__SCREAMING_SNAKE_CASE )
return {"sample": image}
@property
def A__ ( self ) -> Tuple:
return (3, 32, 32)
@property
def A__ ( self ) -> Tuple:
return (3, 32, 32)
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
__lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def A__ ( self ) -> Optional[int]:
pass
def A__ ( self ) -> Tuple:
pass
def A__ ( self ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A__ ( self ) -> List[Any]:
__lowerCAmelCase = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(__SCREAMING_SNAKE_CASE ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
__lowerCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
__lowerCAmelCase = image.to(__SCREAMING_SNAKE_CASE )
with torch.no_grad():
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE ).sample
__lowerCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__lowerCAmelCase = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
| 465
|
"""simple docstring"""
def __magic_name__ ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ) -> int:
def count_of_possible_combinations(UpperCamelCase : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(UpperCamelCase )
def __magic_name__ ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ) -> int:
def count_of_possible_combinations_with_dp_array(
UpperCamelCase : int , UpperCamelCase : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
a__ = sum(
count_of_possible_combinations_with_dp_array(target - item , UpperCamelCase )
for item in array )
a__ = answer
return answer
a__ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(UpperCamelCase , UpperCamelCase )
def __magic_name__ ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ) -> int:
a__ = [0] * (target + 1)
a__ = 1
for i in range(1 , target + 1 ):
for j in range(UpperCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Optional[Any] = 3
a : List[str] = 5
a : Dict = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 273
| 0
|
"""simple docstring"""
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__lowerCamelCase :str = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__lowerCamelCase :int = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def snake_case ( UpperCamelCase__ : int ) -> Any:
lowerCamelCase : Dict = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=UpperCamelCase__ )[0]
@deprecated(UpperCamelCase__ , """Please use tf.data to implement this functionality.""" )
def snake_case ( UpperCamelCase__ : List[str] ) -> Any:
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=UpperCamelCase__ ) as bytestream:
lowerCamelCase : Optional[Any] = _readaa(UpperCamelCase__ )
if magic != 2051:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
lowerCamelCase : List[Any] = _readaa(UpperCamelCase__ )
lowerCamelCase : int = _readaa(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = _readaa(UpperCamelCase__ )
lowerCamelCase : Dict = bytestream.read(rows * cols * num_images )
lowerCamelCase : Dict = numpy.frombuffer(UpperCamelCase__ , dtype=numpy.uinta )
lowerCamelCase : Optional[int] = data.reshape(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , 1 )
return data
@deprecated(UpperCamelCase__ , """Please use tf.one_hot on tensors.""" )
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
lowerCamelCase : List[Any] = labels_dense.shape[0]
lowerCamelCase : Union[str, Any] = numpy.arange(UpperCamelCase__ ) * num_classes
lowerCamelCase : List[Any] = numpy.zeros((num_labels, num_classes) )
lowerCamelCase : Tuple = 1
return labels_one_hot
@deprecated(UpperCamelCase__ , """Please use tf.data to implement this functionality.""" )
def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : Dict=False , UpperCamelCase__ : List[str]=10 ) -> str:
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=UpperCamelCase__ ) as bytestream:
lowerCamelCase : Dict = _readaa(UpperCamelCase__ )
if magic != 2049:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
lowerCamelCase : str = _readaa(UpperCamelCase__ )
lowerCamelCase : Any = bytestream.read(UpperCamelCase__ )
lowerCamelCase : Tuple = numpy.frombuffer(UpperCamelCase__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(UpperCamelCase__ , UpperCamelCase__ )
return labels
class A__ :
"""simple docstring"""
@deprecated(
__a , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__( self: Tuple , __a: Tuple , __a: Dict , __a: Tuple=False , __a: int=False , __a: Optional[int]=dtypes.floataa , __a: Tuple=True , __a: int=None , )-> Any:
lowerCamelCase : Dict = random_seed.get_seed(__a )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCamelCase : str = dtypes.as_dtype(__a ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
lowerCamelCase : Optional[Any] = 10_000
lowerCamelCase : int = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
lowerCamelCase : Union[str, Any] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCamelCase : List[str] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCamelCase : Optional[Any] = images.astype(numpy.floataa )
lowerCamelCase : List[str] = numpy.multiply(__a , 1.0 / 255.0 )
lowerCamelCase : Any = images
lowerCamelCase : List[str] = labels
lowerCamelCase : Dict = 0
lowerCamelCase : Union[str, Any] = 0
@property
def a__ ( self: Tuple )-> List[Any]:
return self._images
@property
def a__ ( self: Optional[int] )-> int:
return self._labels
@property
def a__ ( self: List[Any] )-> Optional[Any]:
return self._num_examples
@property
def a__ ( self: Optional[int] )-> Dict:
return self._epochs_completed
def a__ ( self: Any , __a: str , __a: Optional[int]=False , __a: str=True )-> Optional[int]:
if fake_data:
lowerCamelCase : List[str] = [1] * 784
lowerCamelCase : str = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__a )],
[fake_label for _ in range(__a )],
)
lowerCamelCase : Dict = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCamelCase : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(__a )
lowerCamelCase : Any = self.images[perma]
lowerCamelCase : Optional[int] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCamelCase : Union[str, Any] = self._num_examples - start
lowerCamelCase : Tuple = self._images[start : self._num_examples]
lowerCamelCase : int = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCamelCase : Optional[int] = numpy.arange(self._num_examples )
numpy.random.shuffle(__a )
lowerCamelCase : Dict = self.images[perm]
lowerCamelCase : Optional[int] = self.labels[perm]
# Start next epoch
lowerCamelCase : str = 0
lowerCamelCase : Union[str, Any] = batch_size - rest_num_examples
lowerCamelCase : str = self._index_in_epoch
lowerCamelCase : List[Any] = self._images[start:end]
lowerCamelCase : Union[str, Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCamelCase : List[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(UpperCamelCase__ , """Please write your own downloading logic.""" )
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Dict ) -> Any:
if not gfile.Exists(UpperCamelCase__ ):
gfile.MakeDirs(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if not gfile.Exists(UpperCamelCase__ ):
urllib.request.urlretrieve(UpperCamelCase__ , UpperCamelCase__ ) # noqa: S310
with gfile.GFile(UpperCamelCase__ ) as f:
lowerCamelCase : str = f.size()
print("""Successfully downloaded""" , UpperCamelCase__ , UpperCamelCase__ , """bytes.""" )
return filepath
@deprecated(
UpperCamelCase__ , """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : str=dtypes.floataa , UpperCamelCase__ : str=True , UpperCamelCase__ : str=5000 , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : str=DEFAULT_SOURCE_URL , ) -> Tuple:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=UpperCamelCase__ , one_hot=UpperCamelCase__ , dtype=UpperCamelCase__ , seed=UpperCamelCase__ )
lowerCamelCase : Tuple = fake()
lowerCamelCase : List[str] = fake()
lowerCamelCase : str = fake()
return _Datasets(train=UpperCamelCase__ , validation=UpperCamelCase__ , test=UpperCamelCase__ )
if not source_url: # empty string check
lowerCamelCase : List[str] = DEFAULT_SOURCE_URL
lowerCamelCase : Any = """train-images-idx3-ubyte.gz"""
lowerCamelCase : Optional[int] = """train-labels-idx1-ubyte.gz"""
lowerCamelCase : Optional[Any] = """t10k-images-idx3-ubyte.gz"""
lowerCamelCase : Any = """t10k-labels-idx1-ubyte.gz"""
lowerCamelCase : Any = _maybe_download(
UpperCamelCase__ , UpperCamelCase__ , source_url + train_images_file )
with gfile.Open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : List[Any] = _extract_images(UpperCamelCase__ )
lowerCamelCase : Tuple = _maybe_download(
UpperCamelCase__ , UpperCamelCase__ , source_url + train_labels_file )
with gfile.Open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : int = _extract_labels(UpperCamelCase__ , one_hot=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = _maybe_download(
UpperCamelCase__ , UpperCamelCase__ , source_url + test_images_file )
with gfile.Open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : Optional[int] = _extract_images(UpperCamelCase__ )
lowerCamelCase : List[Any] = _maybe_download(
UpperCamelCase__ , UpperCamelCase__ , source_url + test_labels_file )
with gfile.Open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : Dict = _extract_labels(UpperCamelCase__ , one_hot=UpperCamelCase__ )
if not 0 <= validation_size <= len(UpperCamelCase__ ):
lowerCamelCase : Tuple = (
"""Validation size should be between 0 and """
F'{len(UpperCamelCase__ )}. Received: {validation_size}.'
)
raise ValueError(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = train_images[:validation_size]
lowerCamelCase : Tuple = train_labels[:validation_size]
lowerCamelCase : Optional[int] = train_images[validation_size:]
lowerCamelCase : Optional[int] = train_labels[validation_size:]
lowerCamelCase : Union[str, Any] = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
lowerCamelCase : Optional[int] = _DataSet(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : List[Any] = _DataSet(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = _DataSet(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
return _Datasets(train=UpperCamelCase__ , validation=UpperCamelCase__ , test=UpperCamelCase__ )
| 721
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Tuple , __a: Optional[int] )-> List[str]:
return None
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Tuple , __a: str , __a: str , __a: str )-> Tuple:
return None
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =[
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ ( self: Optional[Any] )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """tf""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: str )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """pt""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: Union[str, Any] )-> Dict:
from transformers import BertModel
lowerCamelCase : int = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__a ) )
vocab_file.flush()
lowerCamelCase : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__a ) ) )
model.save_pretrained(__a )
self._test_export(__a , """pt""" , 12 , __a )
@require_tf
@slow
def a__ ( self: Optional[Any] )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Optional[int] = self._test_export(__a , """tf""" , 12 , **__a )
lowerCamelCase : Tuple = quantize(Path(__a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def a__ ( self: Any )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Any = self._test_export(__a , """pt""" , 12 , **__a )
lowerCamelCase : Dict = quantize(__a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def a__ ( self: List[Any] , __a: Optional[Any] , __a: List[Any] , __a: Union[str, Any] , __a: Optional[Any]=None , **__a: Optional[int] )-> Any:
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase : Optional[Any] = Path(__a ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__a , __a , __a , __a , __a , **__a )
return path
except Exception as e:
self.fail(__a )
@require_torch
@require_tokenizers
@slow
def a__ ( self: Tuple )-> Dict:
from transformers import BertModel
lowerCamelCase : int = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : List[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """pt""" )
@require_tf
@require_tokenizers
@slow
def a__ ( self: Optional[Any] )-> List[Any]:
from transformers import TFBertModel
lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """tf""" )
def a__ ( self: List[str] , __a: str , __a: Optional[Any] , __a: str )-> List[Any]:
lowerCamelCase : List[str] = FeatureExtractionPipeline(__a , __a )
lowerCamelCase : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = infer_shapes(__a , __a )
# Assert all variables are present
self.assertEqual(len(__a ) , len(__a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __a )
self.assertSequenceEqual(variable_names[3:] , __a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def a__ ( self: List[Any] )-> int:
lowerCamelCase : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowerCamelCase : str = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncContiguousArgs() , __a , __a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__a ) , set(__a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__a , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , __a , __a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__a ) , 1 )
self.assertEqual(len(__a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 42
| 0
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
SCREAMING_SNAKE_CASE :List[Any] = logging.getLogger(__name__)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "sequence-classification"
def __init__( self : Tuple ,A : List[Any] ):
if type(A ) == dict:
__A = Namespace(**A )
__A = glue_output_modes[hparams.task]
__A = glue_tasks_num_labels[hparams.task]
super().__init__(A ,A ,self.mode )
def UpperCamelCase_ ( self : List[Any] ,**A : Optional[Any] ):
return self.model(**A )
def UpperCamelCase_ ( self : Optional[Any] ,A : int ,A : Any ):
__A = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__A = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__A = self(**A )
__A = outputs[0]
__A = self.trainer.lr_schedulers[0]["scheduler"]
__A = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.hparams
__A = processors[args.task]()
__A = processor.get_labels()
for mode in ["train", "dev"]:
__A = self._feature_file(A )
if os.path.exists(A ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" ,A )
else:
logger.info("Creating features from dataset file at %s" ,args.data_dir )
__A = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
__A = convert_examples_to_features(
A ,self.tokenizer ,max_length=args.max_seq_length ,label_list=self.labels ,output_mode=args.glue_output_mode ,)
logger.info("Saving features into cached file %s" ,A )
torch.save(A ,A )
def UpperCamelCase_ ( self : Union[str, Any] ,A : str ,A : int ,A : bool = False ):
__A = "dev" if mode == "test" else mode
__A = self._feature_file(A )
logger.info("Loading features from cached file %s" ,A )
__A = torch.load(A )
__A = torch.tensor([f.input_ids for f in features] ,dtype=torch.long )
__A = torch.tensor([f.attention_mask for f in features] ,dtype=torch.long )
__A = torch.tensor([f.token_type_ids for f in features] ,dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__A = torch.tensor([f.label for f in features] ,dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__A = torch.tensor([f.label for f in features] ,dtype=torch.float )
return DataLoader(
TensorDataset(A ,A ,A ,A ) ,batch_size=A ,shuffle=A ,)
def UpperCamelCase_ ( self : Optional[Any] ,A : Optional[Any] ,A : int ):
__A = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__A = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__A = self(**A )
__A , __A = outputs[:2]
__A = logits.detach().cpu().numpy()
__A = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase_ ( self : Optional[Any] ,A : int ):
__A = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
__A = np.concatenate([x["pred"] for x in outputs] ,axis=0 )
if self.hparams.glue_output_mode == "classification":
__A = np.argmax(A ,axis=1 )
elif self.hparams.glue_output_mode == "regression":
__A = np.squeeze(A )
__A = np.concatenate([x["target"] for x in outputs] ,axis=0 )
__A = [[] for _ in range(out_label_ids.shape[0] )]
__A = [[] for _ in range(out_label_ids.shape[0] )]
__A = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task ,A ,A )}
__A = dict(results.items() )
__A = results
return ret, preds_list, out_label_list
def UpperCamelCase_ ( self : Any ,A : list ):
__A , __A , __A = self._eval_end(A )
__A = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase_ ( self : List[str] ,A : Union[str, Any] ):
__A , __A , __A = self._eval_end(A )
__A = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase_ ( A : Any ,A : List[Any] ):
BaseTransformer.add_model_specific_args(A ,A )
parser.add_argument(
"--max_seq_length" ,default=1_28 ,type=A ,help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) ,)
parser.add_argument(
"--task" ,default="" ,type=A ,required=A ,help="The GLUE task to run" ,)
parser.add_argument(
"--gpus" ,default=0 ,type=A ,help="The number of GPUs allocated for this, it is by default 0 meaning none" ,)
parser.add_argument(
"--overwrite_cache" ,action="store_true" ,help="Overwrite the cached training and evaluation sets" )
return parser
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
__A = argparse.ArgumentParser()
add_generic_args(a_ , os.getcwd() )
__A = GLUETransformer.add_model_specific_args(a_ , os.getcwd() )
__A = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__A = os.path.join(
"./results" , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
__A = GLUETransformer(a_ )
__A = generic_train(a_ , a_ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__A = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=a_ ) )
__A = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(a_ )
if __name__ == "__main__":
main()
| 55
|
from __future__ import annotations
import math
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[Any] = u
for i in range(1 , a ):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = temp * (u - i)
return temp
def lowercase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = int(input("enter the numbers of values: " ) )
SCREAMING_SNAKE_CASE_ :list[list[float]] = []
for _ in range(a ):
y.append([] )
for i in range(a ):
for j in range(a ):
y[i].append(a )
SCREAMING_SNAKE_CASE_ :Any = 0
print("enter the values of parameters in a list: " )
SCREAMING_SNAKE_CASE_ :Dict = list(map(a , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(a ):
SCREAMING_SNAKE_CASE_ :List[Any] = float(input() )
SCREAMING_SNAKE_CASE_ :Optional[Any] = int(input("enter the value to interpolate: " ) )
SCREAMING_SNAKE_CASE_ :str = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , a ):
for j in range(n - i ):
SCREAMING_SNAKE_CASE_ :List[str] = y[j + 1][i - 1] - y[j][i - 1]
SCREAMING_SNAKE_CASE_ :Tuple = y[0][0]
for i in range(1 , a ):
summ += (ucal(a , a ) * y[0][i]) / math.factorial(a )
print(F"the value at {value} is {summ}" )
if __name__ == "__main__":
main()
| 631
| 0
|
from __future__ import annotations
_A = 10
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : list[int] ) -> list[int]:
"""simple docstring"""
a_ = 1
a_ = max(UpperCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
a_ = [[] for _ in range(UpperCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
a_ = int((i / placement) % RADIX )
buckets[tmp].append(UpperCamelCase )
# put each buckets' contents into list_of_ints
a_ = 0
for b in range(UpperCamelCase ):
for i in buckets[b]:
a_ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 403
|
import math
_A = 10
_A = 7
_A = BALLS_PER_COLOUR * NUM_COLOURS
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : int = 20 ) -> str:
"""simple docstring"""
a_ = math.comb(UpperCamelCase , UpperCamelCase )
a_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCamelCase )
a_ = NUM_COLOURS * (1 - missing_colour / total)
return F"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 403
| 1
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1024 ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = [], []
lowerCamelCase__ : List[str] = list(zip(_UpperCAmelCase , _UpperCAmelCase ) )
lowerCamelCase__ , lowerCamelCase__ : Dict = sorted_examples[0]
def is_too_big(_UpperCAmelCase ):
return tok(_UpperCAmelCase , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
lowerCamelCase__ : List[Any] = new_src + ' ' + src
lowerCamelCase__ : List[Any] = new_tgt + ' ' + tgt
if is_too_big(_UpperCAmelCase ) or is_too_big(_UpperCAmelCase ): # cant fit, finalize example
finished_src.append(_UpperCAmelCase )
finished_tgt.append(_UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : str = src, tgt
else: # can fit, keep adding
lowerCamelCase__ , lowerCamelCase__ : Any = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(_UpperCAmelCase )
finished_tgt.append(_UpperCAmelCase )
return finished_src, finished_tgt
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
lowerCamelCase__ : Optional[Any] = Path(_UpperCAmelCase )
save_path.mkdir(exist_ok=_UpperCAmelCase )
for split in ["train"]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
lowerCamelCase__ : List[str] = [x.rstrip() for x in Path(_UpperCAmelCase ).open().readlines()]
lowerCamelCase__ : Optional[int] = [x.rstrip() for x in Path(_UpperCAmelCase ).open().readlines()]
lowerCamelCase__ , lowerCamelCase__ : Dict = pack_examples(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
print(F"""packed {split} split from {len(_UpperCAmelCase )} examples -> {len(_UpperCAmelCase )}.""" )
Path(save_path / F"""{split}.source""" ).open('w' ).write('\n'.join(_UpperCAmelCase ) )
Path(save_path / F"""{split}.target""" ).open('w' ).write('\n'.join(_UpperCAmelCase ) )
for split in ["val", "test"]:
lowerCamelCase__ , lowerCamelCase__ : Dict = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(_UpperCAmelCase , save_path / F"""{split}.source""" )
shutil.copyfile(_UpperCAmelCase , save_path / F"""{split}.target""" )
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
lowerCamelCase__ : Any = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=_UpperCAmelCase , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=_UpperCAmelCase , default=128 )
parser.add_argument('--data_dir' , type=_UpperCAmelCase )
parser.add_argument('--save_path' , type=_UpperCAmelCase )
lowerCamelCase__ : Any = parser.parse_args()
lowerCamelCase__ : Dict = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(_UpperCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 295
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : Any ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : Optional[Any] ) -> Optional[int]:
lowerCamelCase__ , lowerCamelCase__ : str = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
lowerCamelCase__ , lowerCamelCase__ : int = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=UpperCAmelCase , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
lowerCamelCase__ : int = controlnet_params
lowerCamelCase__ : Any = 'bird'
lowerCamelCase__ : Dict = jax.device_count()
lowerCamelCase__ : Any = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowerCamelCase__ : Tuple = pipe.prepare_image_inputs([canny_image] * num_samples )
lowerCamelCase__ : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[int] = jax.random.split(UpperCAmelCase , jax.device_count() )
lowerCamelCase__ : List[Any] = replicate(UpperCAmelCase )
lowerCamelCase__ : Dict = shard(UpperCAmelCase )
lowerCamelCase__ : Any = shard(UpperCAmelCase )
lowerCamelCase__ : Any = pipe(
prompt_ids=UpperCAmelCase , image=UpperCAmelCase , params=UpperCAmelCase , prng_seed=UpperCAmelCase , num_inference_steps=50 , jit=UpperCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCamelCase__ : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ : List[str] = images[0, 253:256, 253:256, -1]
lowerCamelCase__ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ : Tuple = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def A_ ( self : List[str] ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Dict = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=UpperCAmelCase , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
lowerCamelCase__ : Optional[int] = controlnet_params
lowerCamelCase__ : Any = 'Chef in the kitchen'
lowerCamelCase__ : str = jax.device_count()
lowerCamelCase__ : Union[str, Any] = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowerCamelCase__ : List[str] = pipe.prepare_image_inputs([pose_image] * num_samples )
lowerCamelCase__ : Tuple = jax.random.PRNGKey(0 )
lowerCamelCase__ : List[str] = jax.random.split(UpperCAmelCase , jax.device_count() )
lowerCamelCase__ : Dict = replicate(UpperCAmelCase )
lowerCamelCase__ : List[str] = shard(UpperCAmelCase )
lowerCamelCase__ : Tuple = shard(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = pipe(
prompt_ids=UpperCAmelCase , image=UpperCAmelCase , params=UpperCAmelCase , prng_seed=UpperCAmelCase , num_inference_steps=50 , jit=UpperCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCamelCase__ : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ : int = images[0, 253:256, 253:256, -1]
lowerCamelCase__ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ : int = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 295
| 1
|
import os
def lowercase_ ( lowercase__ ) ->Dict:
_snake_case: List[str] = len(grid[0] )
_snake_case: Any = len(lowercase__ )
_snake_case: Optional[Any] = 0
_snake_case: List[Any] = 0
_snake_case: Tuple = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(lowercase__ ):
for j in range(n_rows - 3 ):
_snake_case: Dict = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
_snake_case: List[Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
_snake_case: Union[str, Any] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
_snake_case: List[Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
_snake_case: List[str] = max(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if max_product > largest:
_snake_case: str = max_product
return largest
def lowercase_ ( ) ->Optional[Any]:
_snake_case: Tuple = []
with open(os.path.dirname(lowercase__ ) + '/grid.txt' ) as file:
for line in file:
grid.append(line.strip('\n' ).split(' ' ) )
_snake_case: List[str] = [[int(lowercase__ ) for i in grid[j]] for j in range(len(lowercase__ ) )]
return largest_product(lowercase__ )
if __name__ == "__main__":
print(solution())
| 702
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: Optional[Any] = tempfile.mkdtemp()
# fmt: off
_snake_case: Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_snake_case: str = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
_snake_case: str = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
_snake_case: Optional[int] = {'unk_token': '<unk>'}
_snake_case: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case: Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
_snake_case: Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_snake_case: Union[str, Any] = os.path.join(self.tmpdirname , __snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__snake_case , __snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , **__snake_case : Tuple ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , **__snake_case : Any ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : str , **__snake_case : List[Any] ):
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
_snake_case: str = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_snake_case: Any = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: Dict = self.get_tokenizer()
_snake_case: List[str] = self.get_rust_tokenizer()
_snake_case: List[str] = self.get_image_processor()
_snake_case: Union[str, Any] = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case: Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__snake_case )
_snake_case: Any = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case: int = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __snake_case )
self.assertIsInstance(processor_fast.tokenizer , __snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __snake_case )
self.assertIsInstance(processor_fast.image_processor , __snake_case )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
_snake_case: Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case: List[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_snake_case: List[Any] = self.get_image_processor(do_normalize=__snake_case , padding_value=1.0 )
_snake_case: Any = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
_snake_case: List[Any] = self.get_image_processor()
_snake_case: List[str] = self.get_tokenizer()
_snake_case: Any = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_snake_case: List[str] = self.prepare_image_inputs()
_snake_case: List[str] = image_processor(__snake_case , return_tensors='np' )
_snake_case: Dict = processor(images=__snake_case , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: List[Any] = self.get_image_processor()
_snake_case: Any = self.get_tokenizer()
_snake_case: List[str] = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_snake_case: int = 'lower newer'
_snake_case: str = processor(text=__snake_case )
_snake_case: List[Any] = tokenizer(__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case: List[str] = self.get_image_processor()
_snake_case: List[Any] = self.get_tokenizer()
_snake_case: Tuple = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_snake_case: Optional[int] = 'lower newer'
_snake_case: Union[str, Any] = self.prepare_image_inputs()
_snake_case: int = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: Dict = self.get_image_processor()
_snake_case: Dict = self.get_tokenizer()
_snake_case: str = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_snake_case: str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case: List[Any] = processor.batch_decode(__snake_case )
_snake_case: Any = tokenizer.batch_decode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case: Union[str, Any] = self.get_image_processor()
_snake_case: Any = self.get_tokenizer()
_snake_case: Any = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_snake_case: Any = 'lower newer'
_snake_case: Optional[Any] = self.prepare_image_inputs()
_snake_case: int = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 273
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
a : Any = None
a : List[str] = logging.get_logger(__name__)
a : int = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a : Dict = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
a : Dict = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
a : Optional[int] = '▁'
# Segments (not really needed)
a : List[str] = 0
a : Union[str, Any] = 1
a : Union[str, Any] = 2
a : int = 3
a : List[Any] = 4
class UpperCamelCase__ ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Tuple = "left"
SCREAMING_SNAKE_CASE__ : Any = XLNetTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=False , snake_case=True , snake_case=False , snake_case="<s>" , snake_case="</s>" , snake_case="<unk>" , snake_case="<sep>" , snake_case="<pad>" , snake_case="<cls>" , snake_case="<mask>" , snake_case=["<eop>", "<eod>"] , **snake_case , ):
'''simple docstring'''
UpperCAmelCase : int = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
vocab_file=UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
UpperCAmelCase : List[Any] = 3
UpperCAmelCase : Union[str, Any] = do_lower_case
UpperCAmelCase : Tuple = remove_space
UpperCAmelCase : Optional[int] = keep_accents
UpperCAmelCase : Optional[Any] = vocab_file
UpperCAmelCase : List[Any] = False if not self.vocab_file else True
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Any = [self.sep_token_id]
UpperCAmelCase : List[str] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase : Optional[int] = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 679
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__UpperCamelCase : Optional[Any] = '<<<<<<< This should probably be modified because it mentions: '
__UpperCamelCase : Optional[Any] = '=======\n>>>>>>>\n'
__UpperCamelCase : Optional[int] = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
__UpperCamelCase : Union[str, Any] = [
# (pattern, replacement)
# Order is important here for some replacements
(R'tfds\.core', R'datasets'),
(R'tf\.io\.gfile\.GFile', R'open'),
(R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'),
(R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'),
(R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'),
(R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('),
(R'tfds\.features\.FeaturesDict\(', R'dict('),
(R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(R'tfds\.', R'datasets.'),
(R'dl_manager\.manual_dir', R'self.config.data_dir'),
(R'self\.builder_config', R'self.config'),
]
def A ( _lowercase ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowercase__ ( UpperCamelCase_):
@staticmethod
def __A ( UpperCamelCase__ : ArgumentParser ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str , *UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = get_logger('''datasets-cli/converting''' )
SCREAMING_SNAKE_CASE : List[str] = tfds_path
SCREAMING_SNAKE_CASE : Optional[int] = datasets_directory
def __A ( self : Dict ):
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
SCREAMING_SNAKE_CASE : Dict = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
SCREAMING_SNAKE_CASE : Dict = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
SCREAMING_SNAKE_CASE : str = os.path.abspath(self._datasets_directory )
self._logger.info(f"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Dict = {}
if os.path.isdir(self._tfds_path ):
SCREAMING_SNAKE_CASE : List[str] = os.listdir(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Optional[int] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"""Looking at file {f_name}""" )
SCREAMING_SNAKE_CASE : Any = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if not os.path.isfile(UpperCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(UpperCamelCase__ , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = f.readlines()
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : str = []
for line in lines:
SCREAMING_SNAKE_CASE : List[str] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
SCREAMING_SNAKE_CASE : List[str] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
continue
elif "from absl import logging" in out_line:
SCREAMING_SNAKE_CASE : Any = '''from datasets import logging\n'''
elif "getLogger" in out_line:
SCREAMING_SNAKE_CASE : Optional[Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Tuple = list(filter(lambda UpperCamelCase__ : e in out_line , UpperCamelCase__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(UpperCamelCase__ ) + '''\n''' )
out_lines.append(UpperCamelCase__ )
out_lines.append(UpperCamelCase__ )
continue
else:
for pattern, replacement in TO_CONVERT:
SCREAMING_SNAKE_CASE : Any = re.sub(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
SCREAMING_SNAKE_CASE : Optional[int] = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , UpperCamelCase__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
SCREAMING_SNAKE_CASE : List[Any] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
SCREAMING_SNAKE_CASE : Optional[int] = True
out_lines.append(UpperCamelCase__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
SCREAMING_SNAKE_CASE : Dict = f_name.replace('''.py''' , '''''' )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
self._logger.info(f"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(UpperCamelCase__ )
if needs_manual_update:
with_manual_update.append(UpperCamelCase__ )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(UpperCamelCase__ )
self._logger.info(f"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
SCREAMING_SNAKE_CASE : Tuple = os.path.basename(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(f"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(UpperCamelCase__ , UpperCamelCase__ )
except KeyError:
self._logger.error(f"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 248
| 0
|
'''simple docstring'''
import math
from collections.abc import Callable
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Callable[[float], float], SCREAMING_SNAKE_CASE__: float, SCREAMING_SNAKE_CASE__: float ) -> float:
"""simple docstring"""
__a = xa
__a = xa
while True:
if x_n == x_na or function(SCREAMING_SNAKE_CASE__ ) == function(SCREAMING_SNAKE_CASE__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__a = x_na - (
function(SCREAMING_SNAKE_CASE__ ) / ((function(SCREAMING_SNAKE_CASE__ ) - function(SCREAMING_SNAKE_CASE__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
__a = x_na
__a = x_na
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: float ) -> float:
"""simple docstring"""
return math.pow(SCREAMING_SNAKE_CASE__, 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 270
|
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
# warning at import time
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , _lowerCAmelCase , )
| 270
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "CLIPImageProcessor"
lowerCamelCase_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self :List[Any] , __A :Tuple=None , __A :Dict=None , **__A :Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __A , )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""feature_extractor""" )
SCREAMING_SNAKE_CASE__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__A , __A )
def __call__( self :Any , __A :str=None , __A :List[Any]=None , __A :Union[str, Any]=None , **__A :List[Any] ) -> List[str]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _snake_case ( self :Union[str, Any] , *__A :Optional[int] , **__A :Any ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :Optional[Any] , *__A :str , **__A :Tuple ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self :Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _snake_case ( self :Dict ) -> int:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , )
return self.image_processor_class
@property
def _snake_case ( self :Optional[Any] ) -> int:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , )
return self.image_processor
| 6
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __lowerCamelCase :
"""simple docstring"""
a = 42
a = None
a = None
A : Optional[Any] = namedtuple('''CoinsDistribResult''', '''moves excess''')
def lowerCAmelCase__ ( lowerCamelCase : TreeNode | None ):
if root is None:
return 0
# Validation
def count_nodes(lowerCamelCase : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowerCamelCase : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCamelCase ) != count_coins(lowerCamelCase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowerCamelCase : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 ,1 )
_A , _A : Optional[Any] = get_distrib(node.left )
_A , _A : Any = get_distrib(node.right )
_A : str = 1 - left_distrib_excess
_A : Union[str, Any] = 1 - right_distrib_excess
_A : Any = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCamelCase )
+ abs(lowerCamelCase )
)
_A : str = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCamelCase ,lowerCamelCase )
return get_distrib(lowerCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128
| 0
|
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __A :
'''simple docstring'''
@staticmethod
def lowerCAmelCase ( *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ) ->Any:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class __A (unittest.TestCase):
'''simple docstring'''
@require_torch
def lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
snake_case_ = image_classifier(UpperCAmelCase_ , candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(UpperCAmelCase_ ) , [
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}],
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """c"""}, {"""score""": 0.333, """label""": """b"""}],
] , )
snake_case_ = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
],
] , )
@require_tf
def lowerCAmelCase ( self : Union[str, Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" )
snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
snake_case_ = image_classifier(UpperCAmelCase_ , candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}] , )
snake_case_ = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
],
] , )
@slow
@require_torch
def lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
snake_case_ = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
snake_case_ = image_classifier(UpperCAmelCase_ , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
snake_case_ = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
snake_case_ = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
snake_case_ = image_classifier(UpperCAmelCase_ , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
snake_case_ = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
| 2
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: Any = """mctct"""
def __init__( self : Dict , UpperCAmelCase_ : List[Any]=8_065 , UpperCAmelCase_ : Tuple=1_536 , UpperCAmelCase_ : Optional[Any]=36 , UpperCAmelCase_ : int=6_144 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : Any=384 , UpperCAmelCase_ : List[str]=920 , UpperCAmelCase_ : Any=1E-5 , UpperCAmelCase_ : Any=0.3 , UpperCAmelCase_ : Tuple="relu" , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Dict=0.3 , UpperCAmelCase_ : str=0.3 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=0.3 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Optional[Any]=(7,) , UpperCAmelCase_ : Optional[Any]=(3,) , UpperCAmelCase_ : List[str]=80 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[str]="sum" , UpperCAmelCase_ : Union[str, Any]=False , **UpperCAmelCase_ : Any , ) ->Dict:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = num_attention_heads
snake_case_ = attention_head_dim
snake_case_ = max_position_embeddings
snake_case_ = layer_norm_eps
snake_case_ = layerdrop
snake_case_ = hidden_act
snake_case_ = initializer_range
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = eos_token_id
snake_case_ = conv_glu_dim
snake_case_ = conv_dropout
snake_case_ = num_conv_layers
snake_case_ = input_feat_per_channel
snake_case_ = input_channels
snake_case_ = conv_channels
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# prevents config testing fail with exporting to json
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
| 2
| 1
|
_UpperCAmelCase : Any = 6_5521
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 1
snake_case_ = 0
for plain_chr in plain_text:
snake_case_ = (a + ord(UpperCamelCase__ )) % MOD_ADLER
snake_case_ = (b + a) % MOD_ADLER
return (b << 16) | a
| 362
|
import argparse
import struct
import unittest
class lowercase :
def __init__( self , snake_case ):
snake_case_ = data
# Initialize hash values
snake_case_ = [
0x6A09E667,
0xBB67AE85,
0x3C6EF372,
0xA54FF53A,
0x510E527F,
0x9B05688C,
0x1F83D9AB,
0x5BE0CD19,
]
# Initialize round constants
snake_case_ = [
0x428A2F98,
0x71374491,
0xB5C0FBCF,
0xE9B5DBA5,
0x3956C25B,
0x59F111F1,
0x923F82A4,
0xAB1C5ED5,
0xD807AA98,
0x12835B01,
0x243185BE,
0x550C7DC3,
0x72BE5D74,
0x80DEB1FE,
0x9BDC06A7,
0xC19BF174,
0xE49B69C1,
0xEFBE4786,
0x0FC19DC6,
0x240CA1CC,
0x2DE92C6F,
0x4A7484AA,
0x5CB0A9DC,
0x76F988DA,
0x983E5152,
0xA831C66D,
0xB00327C8,
0xBF597FC7,
0xC6E00BF3,
0xD5A79147,
0x06CA6351,
0x14292967,
0x27B70A85,
0x2E1B2138,
0x4D2C6DFC,
0x53380D13,
0x650A7354,
0x766A0ABB,
0x81C2C92E,
0x92722C85,
0xA2BFE8A1,
0xA81A664B,
0xC24B8B70,
0xC76C51A3,
0xD192E819,
0xD6990624,
0xF40E3585,
0x106AA070,
0x19A4C116,
0x1E376C08,
0x2748774C,
0x34B0BCB5,
0x391C0CB3,
0x4ED8AA4A,
0x5B9CCA4F,
0x682E6FF3,
0x748F82EE,
0x78A5636F,
0x84C87814,
0x8CC70208,
0x90BEFFFA,
0xA4506CEB,
0xBEF9A3F7,
0xC67178F2,
]
snake_case_ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def a ( snake_case ):
snake_case_ = b'\x80' + (b'\x00' * (63 - (len(snake_case ) + 8) % 64))
snake_case_ = struct.pack('>Q' , (len(snake_case ) * 8) )
return data + padding + big_endian_integer
def a ( self ):
# Convert into blocks of 64 bytes
snake_case_ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
snake_case_ = list(struct.unpack('>16L' , snake_case ) )
# add 48 0-ed integers
words += [0] * 48
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
snake_case_ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
snake_case_ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
snake_case_ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100000000
# Compression
snake_case_ = self.ror(snake_case , 6 ) ^ self.ror(snake_case , 11 ) ^ self.ror(snake_case , 25 )
snake_case_ = (e & f) ^ ((~e & 0xFFFFFFFF) & g)
snake_case_ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100000000
snake_case_ = self.ror(snake_case , 2 ) ^ self.ror(snake_case , 13 ) ^ self.ror(snake_case , 22 )
snake_case_ = (a & b) ^ (a & c) ^ (b & c)
snake_case_ = (sa + maj) % 0x100000000
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = (
g,
f,
e,
((d + tempa) % 0x100000000),
c,
b,
a,
((tempa + tempa) % 0x100000000),
)
snake_case_ = [a, b, c, d, e, f, g, h]
# Modify final values
snake_case_ = [
((element + mutated_hash_values[index]) % 0x100000000)
for index, element in enumerate(self.hashes )
]
snake_case_ = ''.join([hex(snake_case )[2:].zfill(8 ) for value in self.hashes] )
def a ( self , snake_case , snake_case ):
return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class lowercase ( unittest.TestCase ):
def a ( self ):
import hashlib
snake_case_ = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(snake_case ).hash , hashlib.shaaaa(snake_case ).hexdigest() )
def __lowerCamelCase ( ):
'''simple docstring'''
import doctest
doctest.testmod()
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
snake_case_ = parser.parse_args()
snake_case_ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
snake_case_ = f.read()
else:
snake_case_ = bytes(UpperCamelCase__ , 'utf-8' )
print(SHAaaa(UpperCamelCase__ ).hash )
if __name__ == "__main__":
main()
| 362
| 1
|
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__lowercase : List[str] = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
__lowercase : Optional[Any] = dataset.iloc[:, 1:2].values
__lowercase : Optional[int] = dataset.iloc[:, 2].values
__lowercase ,__lowercase ,__lowercase ,__lowercase : str = train_test_split(X, y, test_size=0.2, random_state=0)
__lowercase : Optional[int] = PolynomialFeatures(degree=4)
__lowercase : List[Any] = poly_reg.fit_transform(X)
__lowercase : Union[str, Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def SCREAMING_SNAKE_CASE ( ):
plt.scatter(lowerCamelCase__, lowerCamelCase__, color='''red''')
plt.plot(lowerCamelCase__, pol_reg.predict(poly_reg.fit_transform(lowerCamelCase__)), color='''blue''')
plt.title('''Truth or Bluff (Linear Regression)''')
plt.xlabel('''Position level''')
plt.ylabel('''Salary''')
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 715
|
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__lowercase : Optional[int] = logging.get_logger(__name__)
__lowercase : Optional[int] = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class _A ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : str , A_ : Optional[Any]=None , A_ : Union[str, Any]=None , *A_ : List[Any] , **A_ : Union[str, Any] ) -> List[Any]:
super().__init__(*A_ , **A_ )
if config is None:
assert isinstance(self.model , A_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f" {self.model.__class__}"
)
__snake_case = self.model.config
else:
__snake_case = config
__snake_case = data_args
__snake_case = self.config.tgt_vocab_size if isinstance(self.config , A_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
''' padding..''' )
if self.args.label_smoothing == 0:
__snake_case = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__snake_case = label_smoothed_nll_loss
def lowercase ( self : List[Any] , A_ : int ) -> Union[str, Any]:
if self.optimizer is None:
__snake_case = ['''bias''', '''LayerNorm.weight''']
__snake_case = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
__snake_case = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__snake_case = Adafactor
__snake_case = {'''scale_parameter''': False, '''relative_step''': False}
else:
__snake_case = AdamW
__snake_case = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
__snake_case = self.args.learning_rate
if self.sharded_ddp:
__snake_case = OSS(
params=A_ , optim=A_ , **A_ , )
else:
__snake_case = optimizer_cls(A_ , **A_ )
if self.lr_scheduler is None:
__snake_case = self._get_lr_scheduler(A_ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def lowercase ( self : Any , A_ : Dict ) -> Dict:
__snake_case = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__snake_case = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__snake_case = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
__snake_case = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A_ )
return scheduler
def lowercase ( self : List[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowercase ( self : List[Any] , A_ : str , A_ : str , A_ : List[str] ) -> Dict:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__snake_case = model(**A_ , use_cache=A_ )[0]
__snake_case = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
__snake_case , __snake_case = model(**A_ , labels=A_ , use_cache=A_ )[:2]
else:
# compute label smoothed loss
__snake_case = model(**A_ , use_cache=A_ )[0]
__snake_case = torch.nn.functional.log_softmax(A_ , dim=-1 )
__snake_case , __snake_case = self.loss_fn(A_ , A_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowercase ( self : int , A_ : Tuple , A_ : List[str] ) -> List[Any]:
__snake_case = inputs.pop('''labels''' )
__snake_case , __snake_case = self._compute_loss(A_ , A_ , A_ )
return loss
def lowercase ( self : Union[str, Any] , A_ : nn.Module , A_ : Dict[str, Union[torch.Tensor, Any]] , A_ : bool , A_ : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
__snake_case = self._prepare_inputs(A_ )
__snake_case = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__snake_case = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **A_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__snake_case = self._pad_tensors_to_max_len(A_ , gen_kwargs['''max_length'''] )
__snake_case = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
__snake_case , __snake_case = self._compute_loss(A_ , A_ , A_ )
__snake_case = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__snake_case = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__snake_case = self._pad_tensors_to_max_len(A_ , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def lowercase ( self : Union[str, Any] , A_ : Any , A_ : List[str] ) -> Optional[Any]:
# If PAD token is not defined at least EOS token has to be defined
__snake_case = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f" padded to `max_length`={max_length}" )
__snake_case = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
__snake_case = tensor
return padded_tensor
| 93
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = "data2vec-vision"
def __init__( self, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=3072, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=1e-12, SCREAMING_SNAKE_CASE_=224, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=[3, 5, 7, 11], SCREAMING_SNAKE_CASE_=[1, 2, 3, 6], SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=0.4, SCREAMING_SNAKE_CASE_=256, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=255, **SCREAMING_SNAKE_CASE_, ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = hidden_size
UpperCamelCase : Dict = num_hidden_layers
UpperCamelCase : List[Any] = num_attention_heads
UpperCamelCase : Optional[Any] = intermediate_size
UpperCamelCase : Union[str, Any] = hidden_act
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : Any = layer_norm_eps
UpperCamelCase : List[Any] = image_size
UpperCamelCase : int = patch_size
UpperCamelCase : Tuple = num_channels
UpperCamelCase : str = use_mask_token
UpperCamelCase : Union[str, Any] = use_absolute_position_embeddings
UpperCamelCase : int = use_relative_position_bias
UpperCamelCase : Optional[int] = use_shared_relative_position_bias
UpperCamelCase : int = layer_scale_init_value
UpperCamelCase : List[Any] = drop_path_rate
UpperCamelCase : str = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCamelCase : List[str] = out_indices
UpperCamelCase : Union[str, Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCamelCase : List[str] = use_auxiliary_head
UpperCamelCase : Any = auxiliary_loss_weight
UpperCamelCase : Any = auxiliary_channels
UpperCamelCase : Tuple = auxiliary_num_convs
UpperCamelCase : str = auxiliary_concat_input
UpperCamelCase : Optional[int] = semantic_loss_ignore_index
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Any = version.parse("1.11" )
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case_ ( self ) -> float:
return 1e-4
| 40
|
from __future__ import annotations
_lowerCAmelCase : Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __snake_case : dict[str, list[str]] , __snake_case : str ) -> None:
'''simple docstring'''
lowerCamelCase = graph
# mapping node to its parent in resulting breadth first tree
lowerCamelCase = {}
lowerCamelCase = source_vertex
def lowerCamelCase__ ( self : List[Any] ) -> None:
'''simple docstring'''
lowerCamelCase = {self.source_vertex}
lowerCamelCase = None
lowerCamelCase = [self.source_vertex] # first in first out queue
while queue:
lowerCamelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__snake_case )
lowerCamelCase = vertex
queue.append(__snake_case )
def lowerCamelCase__ ( self : List[Any] , __snake_case : str ) -> str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCamelCase = self.parent.get(__snake_case )
if target_vertex_parent is None:
lowerCamelCase = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(__snake_case )
return self.shortest_path(__snake_case ) + F'''->{target_vertex}'''
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 246
| 0
|
from __future__ import annotations
from collections import namedtuple
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = namedtuple("result" , "name value")
if (voltage, current, power).count(0) != 1:
raise ValueError("Only one argument must be 0")
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system")
elif voltage == 0:
return result("voltage" , power / current)
elif current == 0:
return result("current" , power / voltage)
elif power == 0:
return result("power" , float(round(abs(voltage * current) , 2)))
else:
raise ValueError("Exactly one argument must be 0")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_lowercase = Lock()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__)
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCAmelCase_ : Optional[Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCAmelCase_ : Any = min(snake_case__ , snake_case__)
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__)
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCAmelCase_ : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCAmelCase_ : Dict = max(snake_case__ , snake_case__)
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : int = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe())
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCAmelCase_ : Tuple = Pipe()
lowerCAmelCase_ : Optional[int] = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ))
lowerCAmelCase_ : int = temp_rs
lowerCAmelCase_ : List[Any] = temp_rr
for i in range(1 , len(snake_case__) - 1):
lowerCAmelCase_ : Dict = Pipe()
lowerCAmelCase_ : List[str] = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ))
lowerCAmelCase_ : Dict = temp_rs
lowerCAmelCase_ : Optional[Any] = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__) - 1,
arr[len(snake_case__) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__) - 1],
) , ))
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__)):
lowerCAmelCase_ : Union[str, Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = list(range(10 , 0 , -1))
print("Initial List")
print(*snake_case__)
lowerCAmelCase_ : Tuple = odd_even_transposition(snake_case__)
print("Sorted List\n")
print(*snake_case__)
if __name__ == "__main__":
main()
| 683
| 1
|
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> list:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
a_ = []
for i in range(len(_UpperCAmelCase ) - pat_len + 1 ):
a_ = True
for j in range(_UpperCAmelCase ):
if s[i + j] != pattern[j]:
a_ = False
break
if match_found:
position.append(_UpperCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search("ABCDEFG", "DE") == [3]
print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
| 697
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = 10
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = [1, 2, 3, 4]
a_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = ''
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
a_ , a_ = process_story(UpperCAmelCase__ )
a_ = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = ['It was the best of times.']
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = torch.tensor([1, 2, 3, 4] )
a_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = 101
a_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
a_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
a_ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ )
np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
| 697
| 1
|
import math
import random
def a__ ( A_, A_ = False ):
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__lowerCAmelCase : str = 0.02
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = float(2 * (random.randint(1, 100 )) - 1 )
for _ in range(A_ ):
# Forward propagation
__magic_name__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__magic_name__ = (expected / 100) - layer_a
# Error delta
__magic_name__ = layer_1_error * sigmoid_function(A_, A_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Any = int(input('Expected value: '))
__lowerCAmelCase : Tuple = int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations))
| 714
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """lilt"""
def __init__( self : Dict , UpperCamelCase__ : List[str]=3_0522 , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=3072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Union[str, Any]=512 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Tuple=1024 , **UpperCamelCase__ : Optional[int] , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = hidden_act
__magic_name__ = intermediate_size
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = position_embedding_type
__magic_name__ = classifier_dropout
__magic_name__ = channel_shrink_ratio
__magic_name__ = max_ad_position_embeddings
| 76
| 0
|
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["torch", "torchsde"]
def __init__( self : str , *__lowerCamelCase : int , **__lowerCamelCase : Dict ) -> List[Any]:
requires_backends(self , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase_ ( cls : Optional[int] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Tuple ) -> Any:
requires_backends(cls , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase_ ( cls : Optional[Any] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Dict ) -> int:
requires_backends(cls , ['''torch''', '''torchsde'''] )
| 493
|
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Any=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=99 , __lowerCamelCase : Dict=32 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : str=37 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Dict=50 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[Any]=None , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = scope
def lowercase_ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_ids, input_mask, token_labels
def lowercase_ ( self : List[str] ) -> Optional[Any]:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
def lowercase_ ( self : Dict ) -> int:
(
(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),
) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase_ ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Dict , ) -> List[str]:
SCREAMING_SNAKE_CASE__ = BertGenerationEncoder(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : str , **__lowerCamelCase : Tuple , ) -> Tuple:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = BertGenerationEncoder(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , **__lowerCamelCase : Optional[int] , ) -> int:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = BertGenerationDecoder(config=__lowerCamelCase ).to(__lowerCamelCase ).eval()
# first forward pass
SCREAMING_SNAKE_CASE__ = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )['''hidden_states'''][0]
SCREAMING_SNAKE_CASE__ = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )['''hidden_states'''][0]
# select random slice
SCREAMING_SNAKE_CASE__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , *__lowerCamelCase : int , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = BertGenerationDecoder(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
a = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
a = (BertGenerationDecoder,) if is_torch_available() else ()
a = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def lowercase_ ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = BertGenerationEncoderTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def lowercase_ ( self : Tuple ) -> List[Any]:
self.config_tester.run_common_tests()
def lowercase_ ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowercase_ ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = '''bert'''
self.model_tester.create_and_check_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__lowerCamelCase )
def lowercase_ ( self : Tuple ) -> Tuple:
# This regression test was failing with PyTorch < 1.3
(
(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),
) = self.model_tester.prepare_config_and_inputs_for_decoder()
SCREAMING_SNAKE_CASE__ = None
self.model_tester.create_and_check_model_as_decoder(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
def lowercase_ ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__lowerCamelCase )
@slow
def lowercase_ ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self : str ) -> List[str]:
SCREAMING_SNAKE_CASE__ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
SCREAMING_SNAKE_CASE__ = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE__ = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
SCREAMING_SNAKE_CASE__ = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE__ = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
| 493
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __A ( snake_case__ ):
UpperCamelCase :Optional[Any] = '''beit'''
def __init__(self , __magic_name__=8192 , __magic_name__=768 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3072 , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1E-12 , __magic_name__=224 , __magic_name__=16 , __magic_name__=3 , __magic_name__=False , __magic_name__=False , __magic_name__=False , __magic_name__=False , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=True , __magic_name__=[3, 5, 7, 11] , __magic_name__=[1, 2, 3, 6] , __magic_name__=True , __magic_name__=0.4 , __magic_name__=256 , __magic_name__=1 , __magic_name__=False , __magic_name__=255 , **__magic_name__ , ):
super().__init__(**lowercase_ )
lowerCamelCase__ : int = vocab_size
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : Tuple = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : int = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : List[Any] = layer_norm_eps
lowerCamelCase__ : List[str] = image_size
lowerCamelCase__ : List[Any] = patch_size
lowerCamelCase__ : Optional[Any] = num_channels
lowerCamelCase__ : int = use_mask_token
lowerCamelCase__ : int = use_absolute_position_embeddings
lowerCamelCase__ : Any = use_relative_position_bias
lowerCamelCase__ : Optional[int] = use_shared_relative_position_bias
lowerCamelCase__ : Any = layer_scale_init_value
lowerCamelCase__ : Dict = drop_path_rate
lowerCamelCase__ : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCamelCase__ : Any = out_indices
lowerCamelCase__ : Dict = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCamelCase__ : Tuple = use_auxiliary_head
lowerCamelCase__ : Optional[Any] = auxiliary_loss_weight
lowerCamelCase__ : List[Any] = auxiliary_channels
lowerCamelCase__ : str = auxiliary_num_convs
lowerCamelCase__ : Union[str, Any] = auxiliary_concat_input
lowerCamelCase__ : Any = semantic_loss_ignore_index
class __A ( snake_case__ ):
UpperCamelCase :int = version.parse('''1.11''' )
@property
def _snake_case (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case (self ):
return 1E-4
| 704
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_lowercase = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def _A (UpperCamelCase : Any ) ->List[str]:
'''simple docstring'''
lowerCamelCase__ : List[str] = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
_lowercase = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def _A (UpperCamelCase : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = list(s_dict.keys() )
for key in keys:
lowerCamelCase__ : Dict = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowerCamelCase__ : Tuple = new_key.replace(UpperCamelCase , UpperCamelCase )
print(f"{key} -> {new_key}" )
lowerCamelCase__ : int = s_dict.pop(UpperCamelCase )
return s_dict
def _A (UpperCamelCase : List[Any] ) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__ ,lowerCamelCase__ : int = emb.weight.shape
lowerCamelCase__ : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
lowerCamelCase__ : List[str] = emb.weight.data
return lin_layer
def _A (UpperCamelCase : str , UpperCamelCase : str ) ->bytes:
'''simple docstring'''
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
lowerCamelCase__ : Any = os.path.basename(UpperCamelCase )
lowerCamelCase__ : int = url.split("""/""" )[-2]
lowerCamelCase__ : Optional[int] = os.path.join(UpperCamelCase , UpperCamelCase )
if os.path.exists(UpperCamelCase ) and not os.path.isfile(UpperCamelCase ):
raise RuntimeError(f"{download_target} exists and is not a regular file" )
if os.path.isfile(UpperCamelCase ):
lowerCamelCase__ : Dict = open(UpperCamelCase , """rb""" ).read()
if hashlib.shaaaa(UpperCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(UpperCamelCase ) as source, open(UpperCamelCase , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=UpperCamelCase , unit_divisor=1024 ) as loop:
while True:
lowerCamelCase__ : Tuple = source.read(8192 )
if not buffer:
break
output.write(UpperCamelCase )
loop.update(len(UpperCamelCase ) )
lowerCamelCase__ : Any = open(UpperCamelCase , """rb""" ).read()
if hashlib.shaaaa(UpperCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def _A (UpperCamelCase : Dict , UpperCamelCase : Any ) ->Optional[Any]:
'''simple docstring'''
if ".pt" not in checkpoint_path:
lowerCamelCase__ : Optional[Any] = _download(_MODELS[checkpoint_path] )
else:
lowerCamelCase__ : Optional[Any] = torch.load(UpperCamelCase , map_location="""cpu""" )
lowerCamelCase__ : Optional[int] = original_checkpoint["""dims"""]
lowerCamelCase__ : int = original_checkpoint["""model_state_dict"""]
lowerCamelCase__ : Optional[int] = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(UpperCamelCase )
rename_keys(UpperCamelCase )
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : str = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
lowerCamelCase__ : List[str] = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=UpperCamelCase , decoder_ffn_dim=UpperCamelCase , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
lowerCamelCase__ : List[str] = WhisperForConditionalGeneration(UpperCamelCase )
lowerCamelCase__ ,lowerCamelCase__ : str = model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
if len(UpperCamelCase ) > 0 and not set(UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f" but all the following weights are missing {missing}" )
if tie_embeds:
lowerCamelCase__ : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCamelCase__ : int = proj_out_weights
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_lowercase = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 96
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def snake_case ( lowerCamelCase=None ):
'''simple docstring'''
if subparsers is not None:
__lowercase = subparsers.add_parser("""test""" )
else:
__lowercase = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=lowerCamelCase , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase )
return parser
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
__lowercase = script_name
else:
__lowercase = F'--config_file={args.config_file} {script_name}'
__lowercase = ["""accelerate-launch"""] + test_args.split()
__lowercase = execute_subprocess_async(lowerCamelCase , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def snake_case ( ):
'''simple docstring'''
__lowercase = test_command_parser()
__lowercase = parser.parse_args()
test_command(lowerCamelCase )
if __name__ == "__main__":
main()
| 80
|
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = len(lowercase__ )
for _ in range(lowercase__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
snake_case_ , snake_case_ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
A = list(range(10, 0, -1))
print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 187
| 0
|
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
def snake_case_ ( lowercase__ : Union[tf.Tensor, np.ndarray] ):
'''simple docstring'''
if isinstance(lowercase__ , np.ndarray ):
return list(tensor.shape )
_lowerCAmelCase =tf.shape(lowercase__ )
if tensor.shape == tf.TensorShape(lowercase__ ):
return dynamic
_lowerCAmelCase =tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(lowercase__ )]
def snake_case_ ( lowercase__ : tf.Tensor , lowercase__ : Optional[int] = None , lowercase__ : Optional[str] = None ):
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1e-9 , axis=lowercase__ , name=lowercase__ )
def snake_case_ ( lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Optional[Any]=1e-5 , lowercase__ : str=-1 ):
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(lowercase__ , lowercase__ ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
_lowerCAmelCase , _lowerCAmelCase =tf.nn.moments(lowercase__ , axes=[axis] , keepdims=lowercase__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_lowerCAmelCase =[1] * inputs.shape.rank
_lowerCAmelCase =shape_list(lowercase__ )[axis]
_lowerCAmelCase =tf.reshape(lowercase__ , lowercase__ )
_lowerCAmelCase =tf.reshape(lowercase__ , lowercase__ )
# Compute layer normalization using the batch_normalization
# function.
_lowerCAmelCase =tf.nn.batch_normalization(
lowercase__ , lowercase__ , lowercase__ , offset=lowercase__ , scale=lowercase__ , variance_epsilon=lowercase__ , )
return outputs
def snake_case_ ( lowercase__ : str , lowercase__ : Optional[Any]=0 , lowercase__ : str=-1 ):
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_lowerCAmelCase =tf.shape(lowercase__ )
_lowerCAmelCase =tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_lowerCAmelCase =tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(lowercase__ , lowercase__ )
def snake_case_ ( lowercase__ : tf.Tensor ):
'''simple docstring'''
if not isinstance(lowercase__ , tf.Tensor ):
_lowerCAmelCase =tf.convert_to_tensor(lowercase__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_lowerCAmelCase =encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_lowerCAmelCase =encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_lowerCAmelCase =(
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def snake_case_ ( lowercase__ : tf.Tensor , lowercase__ : int , lowercase__ : str = "input_ids" ):
'''simple docstring'''
tf.debugging.assert_less(
lowercase__ , tf.cast(lowercase__ , dtype=tensor.dtype ) , message=(
f"The maximum value of {tensor_name} ({tf.math.reduce_max(lowercase__ )}) must be smaller than the embedding "
f"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."
) , )
def snake_case_ ( lowercase__ : str , lowercase__ : str , lowercase__ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase =6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_lowerCAmelCase =[x for x in data if len(lowercase__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
f"they are larger than {HDF5_OBJECT_HEADER_LIMIT} "
f"bytes: {bad_attributes}" )
_lowerCAmelCase =np.asarray(lowercase__ )
_lowerCAmelCase =1
_lowerCAmelCase =np.array_split(lowercase__ , lowercase__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_lowerCAmelCase =np.array_split(lowercase__ , lowercase__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(lowercase__ ):
_lowerCAmelCase =chunk_data
else:
_lowerCAmelCase =data
def snake_case_ ( lowercase__ : Optional[Any] , lowercase__ : Optional[int] ):
'''simple docstring'''
if name in group.attrs:
_lowerCAmelCase =[n.decode("""utf8""" ) if hasattr(lowercase__ , """decode""" ) else n for n in group.attrs[name]]
else:
_lowerCAmelCase =[]
_lowerCAmelCase =0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(lowercase__ , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def snake_case_ ( lowercase__ : Tuple ):
'''simple docstring'''
def _expand_single_ad_tensor(lowercase__ : str ):
if isinstance(lowercase__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(lowercase__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , lowercase__ )
| 149
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : Optional[int]=32 , lowerCamelCase_ : int=3 , lowerCamelCase_ : Optional[Any]=10 , lowerCamelCase_ : Any=[10, 20, 30, 40] , lowerCamelCase_ : List[Any]=[1, 1, 2, 1] , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[int]="relu" , lowerCamelCase_ : Optional[Any]=3 , lowerCamelCase_ : int=None , ):
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =embeddings_size
_lowerCAmelCase =hidden_sizes
_lowerCAmelCase =depths
_lowerCAmelCase =is_training
_lowerCAmelCase =use_labels
_lowerCAmelCase =hidden_act
_lowerCAmelCase =num_labels
_lowerCAmelCase =scope
_lowerCAmelCase =len(lowerCamelCase_ )
def lowerCAmelCase__ ( self : Any ):
_lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase =self.get_config()
return config, pixel_values
def lowerCAmelCase__ ( self : int ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCAmelCase__ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : List[str] ):
_lowerCAmelCase =FlaxRegNetModel(config=lowerCamelCase_ )
_lowerCAmelCase =model(lowerCamelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] ):
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =FlaxRegNetForImageClassification(config=lowerCamelCase_ )
_lowerCAmelCase =model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : List[str] ):
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __lowerCamelCase ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_: Optional[int] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
a_: Optional[int] = False
a_: Any = False
a_: Union[str, Any] = False
def lowerCAmelCase__ ( self : List[Any] ):
_lowerCAmelCase =FlaxRegNetModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowerCAmelCase__ ( self : Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__ ( self : Union[str, Any] ):
return
def lowerCAmelCase__ ( self : Optional[int] ):
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def lowerCAmelCase__ ( self : Dict ):
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def lowerCAmelCase__ ( self : int ):
pass
def lowerCAmelCase__ ( self : List[Any] ):
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(lowerCamelCase_ )
_lowerCAmelCase =inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase =[*signature.parameters.keys()]
_lowerCAmelCase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCAmelCase__ ( self : int ):
def check_hidden_states_output(lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] ):
_lowerCAmelCase =model_class(lowerCamelCase_ )
_lowerCAmelCase =model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
_lowerCAmelCase =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase =self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase =True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase__ ( self : List[Any] ):
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase =self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
_lowerCAmelCase =model_class(lowerCamelCase_ )
@jax.jit
def model_jitted(lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Dict ):
return model(pixel_values=lowerCamelCase_ , **lowerCamelCase_ )
with self.subTest("""JIT Enabled""" ):
_lowerCAmelCase =model_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_lowerCAmelCase =model_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case_ ( ):
'''simple docstring'''
_lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase__ ( self : Union[str, Any] ):
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self : List[str] ):
_lowerCAmelCase =FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
_lowerCAmelCase =self.default_image_processor
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(images=lowerCamelCase_ , return_tensors="""np""" )
_lowerCAmelCase =model(**lowerCamelCase_ )
# verify the logits
_lowerCAmelCase =(1, 1000)
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
_lowerCAmelCase =jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) )
| 149
| 1
|
import collections
import os
import re
from pathlib import Path
snake_case__ : Any = '''src/transformers'''
# Matches is_xxx_available()
snake_case__ : Optional[Any] = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
snake_case__ : Optional[int] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
snake_case__ : str = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
snake_case__ : List[Any] = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
snake_case__ : List[str] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
snake_case__ : Optional[int] = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
snake_case__ : int = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
snake_case__ : Union[str, Any] = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
snake_case__ : List[str] = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
snake_case__ : int = re.compile(R'''^\s*try:''')
# Catches a line with else:
snake_case__ : List[str] = re.compile(R'''^\s*else:''')
def lowercase ( _lowerCAmelCase ):
if _re_test_backend.search(_lowerCAmelCase ) is None:
return None
UpperCAmelCase__ = [b[0] for b in _re_backend.findall(_lowerCAmelCase )]
backends.sort()
return "_and_".join(_lowerCAmelCase )
def lowercase ( _lowerCAmelCase ):
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = 0
while line_index < len(_lowerCAmelCase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_lowerCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase__ = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_lowerCAmelCase ):
UpperCAmelCase__ = _re_one_line_import_struct.search(_lowerCAmelCase ).groups()[0]
UpperCAmelCase__ = re.findall(R"""\[([^\]]+)\]""" , _lowerCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
UpperCAmelCase__ = _re_import_struct_key_value.search(_lowerCAmelCase )
if single_line_import_search is not None:
UpperCAmelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(_lowerCAmelCase ) > 0]
objects.extend(_lowerCAmelCase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase__ = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
UpperCAmelCase__ = lines[line_index]
if _re_import_struct_add_one.search(_lowerCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(_lowerCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(_lowerCAmelCase ) is not None:
UpperCAmelCase__ = _re_import_struct_add_many.search(_lowerCAmelCase ).groups()[0].split(""", """ )
UpperCAmelCase__ = [obj[1:-1] for obj in imports if len(_lowerCAmelCase ) > 0]
objects.extend(_lowerCAmelCase )
elif _re_between_brackets.search(_lowerCAmelCase ) is not None:
UpperCAmelCase__ = _re_between_brackets.search(_lowerCAmelCase ).groups()[0].split(""", """ )
UpperCAmelCase__ = [obj[1:-1] for obj in imports if len(_lowerCAmelCase ) > 0]
objects.extend(_lowerCAmelCase )
elif _re_quote_object.search(_lowerCAmelCase ) is not None:
objects.append(_re_quote_object.search(_lowerCAmelCase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
UpperCAmelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase__ = []
while (
line_index < len(_lowerCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
UpperCAmelCase__ = lines[line_index]
UpperCAmelCase__ = _re_import.search(_lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase__ = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(_lowerCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
UpperCAmelCase__ = lines[line_index]
UpperCAmelCase__ = _re_import.search(_lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCAmelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowercase ( _lowerCAmelCase , _lowerCAmelCase ):
def find_duplicates(_lowerCAmelCase ):
return [k for k, v in collections.Counter(_lowerCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase__ = []
for key in import_dict_objects.keys():
UpperCAmelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
UpperCAmelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase__ = """base imports""" if key == """none""" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def lowercase ( ):
UpperCAmelCase__ = []
for root, _, files in os.walk(_lowerCAmelCase ):
if "__init__.py" in files:
UpperCAmelCase__ = os.path.join(_lowerCAmelCase , """__init__.py""" )
UpperCAmelCase__ = parse_init(_lowerCAmelCase )
if objects is not None:
UpperCAmelCase__ = analyze_results(*_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
UpperCAmelCase__ = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(_lowerCAmelCase ) )
if len(_lowerCAmelCase ) > 0:
raise ValueError("""\n\n""".join(_lowerCAmelCase ) )
def lowercase ( ):
UpperCAmelCase__ = []
for path, directories, files in os.walk(_lowerCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(_lowerCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_lowerCAmelCase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
UpperCAmelCase__ = str((Path(_lowerCAmelCase ) / folder).relative_to(_lowerCAmelCase ) )
UpperCAmelCase__ = short_path.replace(os.path.sep , """.""" )
submodules.append(_lowerCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase__ = str((Path(_lowerCAmelCase ) / fname).relative_to(_lowerCAmelCase ) )
UpperCAmelCase__ = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(_lowerCAmelCase )
return submodules
snake_case__ : List[Any] = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def lowercase ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
UpperCAmelCase__ = direct_transformers_import(_lowerCAmelCase )
UpperCAmelCase__ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_lowerCAmelCase , """__init__.py""" ) , """r""" ) as f:
UpperCAmelCase__ = f.read()
import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , _lowerCAmelCase ) ) )
UpperCAmelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_lowerCAmelCase ) > 0:
UpperCAmelCase__ = """\n""".join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
F'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 392
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
UpperCAmelCase__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase__ = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase_ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , lowerCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowerCamelCase_ , atol=1E-3 ) )
@slow
def UpperCAmelCase ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
UpperCAmelCase__ = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
UpperCAmelCase__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase__ = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase_ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , lowerCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowerCamelCase_ , atol=1E-3 ) )
| 392
| 1
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Optional[Any] = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
snake_case_ : Dict = DatasetInfosDict.from_directory(snake_case__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , ),
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : DatasetInfo ):
"""simple docstring"""
snake_case_ : str = str(snake_case__ )
dataset_info.write_to_directory(snake_case__ )
snake_case_ : List[Any] = DatasetInfo.from_directory(snake_case__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(snake_case__ , """dataset_info.json""" ) )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Tuple = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
snake_case_ : List[str] = dataset_info._to_yaml_dict()
assert sorted(snake_case__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
snake_case_ : Optional[Any] = yaml.safe_dump(snake_case__ )
snake_case_ : str = yaml.safe_load(snake_case__ )
assert dataset_info_yaml_dict == reloaded
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Tuple = DatasetInfo()
snake_case_ : int = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=4_2 ),
"""v2""": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : DatasetInfosDict ):
"""simple docstring"""
snake_case_ : Optional[int] = str(snake_case__ )
dataset_infos_dict.write_to_directory(snake_case__ )
snake_case_ : List[str] = DatasetInfosDict.from_directory(snake_case__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
snake_case_ : Tuple = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
snake_case_ : Union[str, Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(snake_case__ , """README.md""" ) )
| 716
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = ["""pixel_values"""]
def __init__(self , lowercase__ = True , lowercase__ = None , lowercase__ = 0.9 , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = True , lowercase__ = None , lowercase__ = 1 / 2_55 , lowercase__ = True , lowercase__ = True , lowercase__ = None , lowercase__ = None , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Tuple = size if size is not None else {"""shortest_edge""": 2_24}
snake_case_ : Union[str, Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : str = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
snake_case_ : Dict = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : Union[str, Any] = do_resize
snake_case_ : List[str] = size
snake_case_ : str = crop_pct
snake_case_ : str = resample
snake_case_ : Optional[Any] = do_center_crop
snake_case_ : Dict = crop_size
snake_case_ : int = do_rescale
snake_case_ : Optional[int] = rescale_factor
snake_case_ : str = do_normalize
snake_case_ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case_ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ):
snake_case_ : Tuple = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
snake_case_ : Optional[int] = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
snake_case_ : Dict = int(size["""height"""] / crop_pct )
else:
snake_case_ : List[str] = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
snake_case_ : List[Any] = get_resize_output_image_size(lowercase__ , size=lowercase__ , default_to_square=lowercase__ )
else:
if "shortest_edge" in size:
snake_case_ : Optional[int] = get_resize_output_image_size(lowercase__ , size=size["""shortest_edge"""] , default_to_square=lowercase__ )
elif "height" in size and "width" in size:
snake_case_ : int = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
snake_case_ : int = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowercase__ , size=(size["""height"""], size["""width"""]) , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
snake_case_ : str = do_resize if do_resize is not None else self.do_resize
snake_case_ : Any = crop_pct if crop_pct is not None else self.crop_pct
snake_case_ : List[Any] = resample if resample is not None else self.resample
snake_case_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : str = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : int = image_std if image_std is not None else self.image_std
snake_case_ : List[Any] = size if size is not None else self.size
snake_case_ : Optional[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : List[Any] = crop_size if crop_size is not None else self.crop_size
snake_case_ : int = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : List[str] = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case_ : int = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
snake_case_ : str = [self.resize(image=lowercase__ , size=lowercase__ , crop_pct=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
snake_case_ : Optional[int] = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
snake_case_ : List[Any] = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
if do_normalize:
snake_case_ : Optional[Any] = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images]
snake_case_ : List[Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
snake_case_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 48
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.