code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Any = '''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'''
_lowerCamelCase : str = Image.open(requests.get(A_, stream=A_ ).raw ).convert('''RGB''' )
return image
def snake_case_ ( A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') )
# fmt: on
return rename_keys
def snake_case_ ( A_ : str, A_ : Optional[Any], A_ : Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = dct.pop(A_ )
_lowerCamelCase : Dict = val
def snake_case_ ( A_ : List[Any], A_ : List[Any] ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCamelCase : Tuple = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
_lowerCamelCase : Any = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
_lowerCamelCase : Union[str, Any] = torch.cat((q_bias, torch.zeros_like(A_, requires_grad=A_ ), v_bias) )
_lowerCamelCase : Any = qkv_bias
def snake_case_ ( A_ : int ):
'''simple docstring'''
_lowerCamelCase : Tuple = 3_64 if '''coco''' in model_name else 2_24
_lowerCamelCase : Optional[int] = InstructBlipVisionConfig(image_size=A_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
_lowerCamelCase : List[str] = TaConfig.from_pretrained('''google/flan-t5-xl''', dense_act_fn='''gelu''', bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCamelCase : int = TaConfig.from_pretrained('''google/flan-t5-xxl''', dense_act_fn='''gelu''', bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
_lowerCamelCase : int = LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''', vocab_size=3_20_01 ).to_dict()
elif "vicuna-13b" in model_name:
_lowerCamelCase : Dict = LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''', vocab_size=3_20_01 ).to_dict()
else:
raise ValueError('''Model name not supported''' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
_lowerCamelCase : Any = InstructBlipQFormerConfig(vocab_size=3_05_23 ).to_dict()
_lowerCamelCase : List[Any] = InstructBlipConfig(vision_config=A_, text_config=A_, qformer_config=A_ )
return config, image_size
@torch.no_grad()
def snake_case_ ( A_ : str, A_ : Any=None, A_ : str=False ):
'''simple docstring'''
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''bert-base-uncased''', truncation_side='''left''' )
qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} )
if "t5" in model_name:
_lowerCamelCase : Dict = TaTokenizerFast.from_pretrained('''google/flan-t5-xl''', truncation_side='''left''' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
_lowerCamelCase : Optional[int] = LlamaTokenizerFast.from_pretrained(
'''huggyllama/llama-7b''', truncation_side='''left''', bos_token='''</s>''', unk_token='''</s>''' )
tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} )
_lowerCamelCase , _lowerCamelCase : Dict = get_blipa_config(A_ )
_lowerCamelCase : Dict = InstructBlipForConditionalGeneration(A_ ).eval()
_lowerCamelCase : Optional[int] = {
'''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''),
'''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''),
'''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''),
'''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''),
}
_lowerCamelCase , _lowerCamelCase : Any = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
_lowerCamelCase : Tuple = '''cuda:1''' if torch.cuda.is_available() else '''cpu'''
_lowerCamelCase : Dict = '''cuda:2''' if torch.cuda.is_available() else '''cpu'''
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = load_model_and_preprocess(
name=A_, model_type=A_, is_eval=A_, device=A_ )
original_model.eval()
print('''Done!''' )
# update state dict keys
_lowerCamelCase : Tuple = original_model.state_dict()
_lowerCamelCase : Tuple = create_rename_keys(A_ )
for src, dest in rename_keys:
rename_key(A_, A_, A_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCamelCase : Tuple = state_dict.pop(A_ )
if key.startswith('''Qformer.bert''' ):
_lowerCamelCase : str = key.replace('''Qformer.bert''', '''qformer''' )
if "attention.self" in key:
_lowerCamelCase : List[Any] = key.replace('''self''', '''attention''' )
if "llm_proj" in key:
_lowerCamelCase : Dict = key.replace('''llm_proj''', '''language_projection''' )
if "t5_proj" in key:
_lowerCamelCase : List[Any] = key.replace('''t5_proj''', '''language_projection''' )
if key.startswith('''llm_model''' ):
_lowerCamelCase : Optional[int] = key.replace('''llm_model''', '''language_model''' )
if key.startswith('''t5''' ):
_lowerCamelCase : List[Any] = key.replace('''t5''', '''language''' )
_lowerCamelCase : List[Any] = val
# read in qv biases
read_in_q_v_bias(A_, A_ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(A_, strict=A_ )
_lowerCamelCase : str = load_demo_image()
_lowerCamelCase : Union[str, Any] = '''What is unusual about this image?'''
# create processor
_lowerCamelCase : Tuple = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size}, image_mean=A_, image_std=A_ )
_lowerCamelCase : List[str] = InstructBlipProcessor(
image_processor=A_, tokenizer=A_, qformer_tokenizer=A_, )
_lowerCamelCase : Dict = processor(images=A_, text=A_, return_tensors='''pt''' ).to(A_ )
# make sure processor creates exact same pixel values
_lowerCamelCase : Optional[Any] = vis_processors['''eval'''](A_ ).unsqueeze(0 ).to(A_ )
_lowerCamelCase : List[Any] = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ), A_ )
original_model.to(A_ )
hf_model.to(A_ )
with torch.no_grad():
if "vicuna" in model_name:
_lowerCamelCase : List[Any] = original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits
_lowerCamelCase : List[Any] = hf_model(**A_ ).logits
else:
_lowerCamelCase : Tuple = original_model(
{'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits
_lowerCamelCase : str = tokenizer('''\n''', return_tensors='''pt''' ).input_ids.to(A_ )
_lowerCamelCase : Optional[Any] = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id, -1_00 )
_lowerCamelCase : Any = hf_model(**A_, labels=A_ ).logits
print('''First values of original logits:''', original_logits[0, :3, :3] )
print('''First values of HF logits:''', logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
_lowerCamelCase : Tuple = 1E-4 if '''vicuna''' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ), A_, atol=A_ )
print('''Looks ok!''' )
print('''Generating with original model...''' )
_lowerCamelCase : str = original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt}, num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('''Generating with HF model...''' )
_lowerCamelCase : Tuple = hf_model.generate(
**A_, do_sample=A_, num_beams=5, max_length=2_56, min_length=1, top_p=0.9, repetition_penalty=1.5, length_penalty=1.0, temperature=1, )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
_lowerCamelCase : Union[str, Any] = 2
print('''Original generation:''', A_ )
_lowerCamelCase : Dict = processor.batch_decode(A_, skip_special_tokens=A_ )
_lowerCamelCase : str = [text.strip() for text in output_text]
print('''HF generation:''', A_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(A_ )
hf_model.save_pretrained(A_ )
if push_to_hub:
processor.push_to_hub(F'''Salesforce/{model_name}''' )
hf_model.push_to_hub(F'''Salesforce/{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
lowerCAmelCase__ = [
'''instructblip-vicuna-7b''',
'''instructblip-vicuna-13b''',
'''instructblip-flan-t5-xl''',
'''instructblip-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''instructblip-flan-t5-xl''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
lowerCAmelCase__ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 83
|
import os
def a ( a = "matrix.txt" ) ->int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(a ) , a ) ) as in_file:
SCREAMING_SNAKE_CASE = in_file.read()
SCREAMING_SNAKE_CASE = [[int(a ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE = len(grid[0] )
SCREAMING_SNAKE_CASE = [[0 for i in range(a )] for j in range(a )]
SCREAMING_SNAKE_CASE = grid[0][0]
for i in range(1 , a ):
SCREAMING_SNAKE_CASE = grid[0][i] + dp[0][i - 1]
for i in range(1 , a ):
SCREAMING_SNAKE_CASE = grid[i][0] + dp[i - 1][0]
for i in range(1 , a ):
for j in range(1 , a ):
SCREAMING_SNAKE_CASE = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 201
| 0
|
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__lowerCAmelCase : List[str] = (
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
warnings.warn(_lowerCamelCase , _lowerCamelCase )
requires_backends(_lowerCamelCase , """sklearn""" )
return (preds == labels).mean()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
warnings.warn(_lowerCamelCase , _lowerCamelCase )
requires_backends(_lowerCamelCase , """sklearn""" )
lowerCAmelCase__ = simple_accuracy(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase__ = fa_score(y_true=_lowerCamelCase , y_pred=_lowerCamelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
warnings.warn(_lowerCamelCase , _lowerCamelCase )
requires_backends(_lowerCamelCase , """sklearn""" )
lowerCAmelCase__ = pearsonr(_lowerCamelCase , _lowerCamelCase )[0]
lowerCAmelCase__ = spearmanr(_lowerCamelCase , _lowerCamelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
warnings.warn(_lowerCamelCase , _lowerCamelCase )
requires_backends(_lowerCamelCase , """sklearn""" )
assert len(_lowerCamelCase ) == len(_lowerCamelCase ), f"""Predictions and labels have mismatched lengths {len(_lowerCamelCase )} and {len(_lowerCamelCase )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "mrpc":
return acc_and_fa(_lowerCamelCase , _lowerCamelCase )
elif task_name == "sts-b":
return pearson_and_spearman(_lowerCamelCase , _lowerCamelCase )
elif task_name == "qqp":
return acc_and_fa(_lowerCamelCase , _lowerCamelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
else:
raise KeyError(_lowerCamelCase )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
warnings.warn(_lowerCamelCase , _lowerCamelCase )
requires_backends(_lowerCamelCase , """sklearn""" )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(_lowerCamelCase )} and {len(_lowerCamelCase )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
else:
raise KeyError(_lowerCamelCase )
| 700
|
"""simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Tuple = PegasusTokenizer
UpperCamelCase_ : Any = PegasusTokenizerFast
UpperCamelCase_ : int = True
UpperCamelCase_ : Any = True
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = PegasusTokenizer(snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : Optional[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any] ):
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = """</s>"""
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(snake_case__ ) , 1103 )
def _SCREAMING_SNAKE_CASE ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCAmelCase__ = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
lowerCAmelCase__ = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowerCAmelCase__ = """To ensure a smooth flow of bank resolutions."""
lowerCAmelCase__ = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = ["""This is going to be way too long.""" * 150, """short example"""]
lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
lowerCAmelCase__ = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
@slow
def _SCREAMING_SNAKE_CASE ( self : str ):
# fmt: off
lowerCAmelCase__ = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : str = PegasusTokenizer
UpperCamelCase_ : Optional[int] = PegasusTokenizerFast
UpperCamelCase_ : Union[str, Any] = True
UpperCamelCase_ : Optional[int] = True
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = PegasusTokenizer(snake_case__ , offset=0 , mask_token_sent=snake_case__ , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict ):
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : List[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Dict ):
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = ["""This is going to be way too long.""" * 1000, """short example"""]
lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
lowerCAmelCase__ = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
lowerCAmelCase__ = self._large_tokenizer(snake_case__ ).input_ids
self.assertListEqual(
snake_case__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 674
| 0
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCamelCase( a__):
return (data["data"], data["target"])
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =XGBClassifier()
classifier.fit(a__ ,a__)
return classifier
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =load_iris()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =data_handling(a__)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =train_test_split(
a__ ,a__ ,test_size=0.25)
_SCREAMING_SNAKE_CASE =iris['''target_names''']
# Create an XGBoost Classifier from the training data
_SCREAMING_SNAKE_CASE =xgboost(a__ ,a__)
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
a__ ,a__ ,a__ ,display_labels=a__ ,cmap='''Blues''' ,normalize='''true''' ,)
plt.title('''Normalized Confusion Matrix - IRIS Dataset''')
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 691
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
snake_case_ : Optional[Any] = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(UpperCamelCase__ )
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "rag"
UpperCAmelCase = True
def __init__( self : Tuple , _a : List[Any]=None , _a : Tuple=True , _a : Optional[Any]=None , _a : int=None , _a : List[str]=None , _a : int=None , _a : Optional[int]=None , _a : str=" / " , _a : Any=" // " , _a : Optional[Any]=5 , _a : int=300 , _a : Optional[Any]=768 , _a : Any=8 , _a : List[str]="wiki_dpr" , _a : Dict="train" , _a : Union[str, Any]="compressed" , _a : str=None , _a : Union[str, Any]=None , _a : int=False , _a : Any=False , _a : Any=0.0 , _a : Any=True , _a : List[str]=False , _a : Optional[int]=False , _a : int=False , _a : Union[str, Any]=True , _a : Optional[int]=None , **_a : List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
bos_token_id=_a , pad_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , is_encoder_decoder=_a , prefix=_a , vocab_size=_a , **_a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_SCREAMING_SNAKE_CASE =kwargs.pop('''question_encoder''' )
_SCREAMING_SNAKE_CASE =question_encoder_config.pop('''model_type''' )
_SCREAMING_SNAKE_CASE =kwargs.pop('''generator''' )
_SCREAMING_SNAKE_CASE =decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =reduce_loss
_SCREAMING_SNAKE_CASE =label_smoothing
_SCREAMING_SNAKE_CASE =exclude_bos_score
_SCREAMING_SNAKE_CASE =do_marginalize
_SCREAMING_SNAKE_CASE =title_sep
_SCREAMING_SNAKE_CASE =doc_sep
_SCREAMING_SNAKE_CASE =n_docs
_SCREAMING_SNAKE_CASE =max_combined_length
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =dataset_split
_SCREAMING_SNAKE_CASE =index_name
_SCREAMING_SNAKE_CASE =retrieval_vector_size
_SCREAMING_SNAKE_CASE =retrieval_batch_size
_SCREAMING_SNAKE_CASE =passages_path
_SCREAMING_SNAKE_CASE =index_path
_SCREAMING_SNAKE_CASE =use_dummy_dataset
_SCREAMING_SNAKE_CASE =output_retrieved
_SCREAMING_SNAKE_CASE =do_deduplication
_SCREAMING_SNAKE_CASE =use_cache
if self.forced_eos_token_id is None:
_SCREAMING_SNAKE_CASE =getattr(self.generator , '''forced_eos_token_id''' , _a )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , _a : PretrainedConfig , _a : PretrainedConfig , **_a : Dict ) -> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.question_encoder.to_dict()
_SCREAMING_SNAKE_CASE =self.generator.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output
| 691
| 1
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _a ( ):
"""simple docstring"""
UpperCAmelCase = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__A , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__A , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__A )
return parser.parse_args()
def _a ( ):
"""simple docstring"""
UpperCAmelCase = parse_args()
# Import training_script as a module.
UpperCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCAmelCase = script_fpath.stem
UpperCAmelCase = importlib.import_module(__A )
# Patch sys.argv
UpperCAmelCase = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 708
|
"""simple docstring"""
from math import sqrt
def _a ( _snake_case = 100_0000 ):
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_snake_case , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 74
| 0
|
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = "Hello world! cécé herlolip"
__magic_name__ = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__ ) -> Any:
'''simple docstring'''
a__ = BertAbsConfig(
temp_dir='.',finetune_bert=UpperCAmelCase__,large=UpperCAmelCase__,share_emb=UpperCAmelCase__,use_bert_emb=UpperCAmelCase__,encoder='bert',max_pos=5_12,enc_layers=6,enc_hidden_size=5_12,enc_heads=8,enc_ff_size=5_12,enc_dropout=0.2,dec_layers=6,dec_hidden_size=7_68,dec_heads=8,dec_ff_size=20_48,dec_dropout=0.2,)
a__ = torch.load(UpperCAmelCase__,lambda UpperCAmelCase__,UpperCAmelCase__ : storage )
a__ = AbsSummarizer(UpperCAmelCase__,torch.device('cpu' ),UpperCAmelCase__ )
original.eval()
a__ = BertAbsSummarizer(UpperCAmelCase__,torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
a__ = BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
a__ = tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(UpperCAmelCase__ )) )
a__ = torch.tensor(UpperCAmelCase__ ).unsqueeze(0 )
a__ = tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(UpperCAmelCase__ )) )
a__ = torch.tensor(UpperCAmelCase__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
a__ = encoder_input_ids
a__ = decoder_input_ids
a__ = a__ = None
a__ = None
a__ = a__ = None
a__ = a__ = None
a__ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
a__ = original(UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ )[0]
a__ = original.generator(UpperCAmelCase__ )
a__ = new_model(
UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ )[0]
a__ = new_model.generator(UpperCAmelCase__ )
a__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(UpperCAmelCase__ ) )
a__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(UpperCAmelCase__ ) )
a__ = torch.allclose(UpperCAmelCase__,UpperCAmelCase__,atol=1e-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict(),'./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
__magic_name__ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 232
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : int ) -> Optional[Any]:
'''simple docstring'''
a__ = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
a__ = AutoTokenizer.from_pretrained('xlm-roberta-base' )
a__ = 'The dog is cute and lives in the garden house'
a__ = jnp.array([tokenizer.encode(_snake_case )] )
a__ = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
a__ = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
a__ = model(_snake_case )['last_hidden_state']
self.assertEqual(output.shape , _snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , _snake_case , atol=1E-3 ) )
| 232
| 1
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Any ) -> Union[str, Any]:
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__lowerCamelCase = flax_key_tuple[:-1] + ('weight',)
__lowerCamelCase = torch.permute(UpperCamelCase__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCamelCase__ ):
# linear layer
__lowerCamelCase = flax_key_tuple[:-1] + ('weight',)
__lowerCamelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__lowerCamelCase = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
if "metadata" in layer:
__lowerCamelCase = layer.split('metadata' )
__lowerCamelCase = ''.join(split_layer[0] )[:-1]
__lowerCamelCase = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
__lowerCamelCase = layer.split('kvstore' )
__lowerCamelCase = ''.join(split_layer[0] )[:-1]
__lowerCamelCase = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
__lowerCamelCase = layer.split('/' )
__lowerCamelCase = '/'.join(split_layer[:-1] )
__lowerCamelCase = (split_layer[-1],)
if "kvstore/path" in layer:
__lowerCamelCase = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
__lowerCamelCase = 'file'
else:
__lowerCamelCase = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = rename_keys(UpperCamelCase__ )
__lowerCamelCase = {}
for k, v in current_block.items():
__lowerCamelCase = v
__lowerCamelCase = new_current_block
torch.save(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
__lowerCamelCase = convert_file_size_to_int(UpperCamelCase__ )
__lowerCamelCase = []
__lowerCamelCase = {}
__lowerCamelCase = 0
__lowerCamelCase = 0
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
__lowerCamelCase = serialization.msgpack_restore(fp.read() )['optimizer']['target']
__lowerCamelCase = flatten_dict(UpperCamelCase__ , sep='/' )
__lowerCamelCase = {}
for layer in checkpoint_info.keys():
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = get_key_and_tensorstore_dict(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if curr_real_layer_name in all_layers:
__lowerCamelCase = content
else:
__lowerCamelCase = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__lowerCamelCase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__lowerCamelCase = torch.tensor(UpperCamelCase__ )
__lowerCamelCase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__lowerCamelCase , __lowerCamelCase = rename_base_flax_keys(tuple(key.split('/' ) ) , UpperCamelCase__ )
__lowerCamelCase = '/'.join(UpperCamelCase__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__lowerCamelCase = os.path.join(
UpperCamelCase__ , weights_name.replace('.bin' , F"""-{len(UpperCamelCase__ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(UpperCamelCase__ , UpperCamelCase__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
__lowerCamelCase = {}
__lowerCamelCase = 0
__lowerCamelCase = raw_weights.to(getattr(UpperCamelCase__ , UpperCamelCase__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__lowerCamelCase = os.path.join(UpperCamelCase__ , weights_name.replace('.bin' , F"""-{len(UpperCamelCase__ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(UpperCamelCase__ , UpperCamelCase__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(UpperCamelCase__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__lowerCamelCase = {}
__lowerCamelCase = {}
for idx, shard in enumerate(UpperCamelCase__ ):
__lowerCamelCase = weights_name.replace(
'.bin' , F"""-{idx+1:05d}-of-{len(UpperCamelCase__ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
__lowerCamelCase = os.path.join(UpperCamelCase__ , weights_name.replace('.bin' , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
__lowerCamelCase = shard
for key in shard:
__lowerCamelCase = shard_file
# Add the metadata
__lowerCamelCase = {'total_size': total_size}
__lowerCamelCase = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , 'w' , encoding='utf-8' ) as f:
__lowerCamelCase = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + '\n'
f.write(UpperCamelCase__ )
return metadata, index
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__A = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__lowerCamelCase = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
__lowerCamelCase = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
__lowerCamelCase = TaTokenizer.from_pretrained('t5-small' )
__lowerCamelCase = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
__lowerCamelCase = tokenizer(UpperCamelCase__ , return_tensors='pt' ).input_ids
__lowerCamelCase = model.generate(UpperCamelCase__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 167
|
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> int:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), F"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
__lowerCamelCase = F"""The input value of [n={number}] has to be > 0"""
raise ValueError(UpperCamelCase__ )
else:
__lowerCamelCase = sylvester(number - 1 )
__lowerCamelCase = num - 1
__lowerCamelCase = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 167
| 1
|
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
SCREAMING_SNAKE_CASE = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
SCREAMING_SNAKE_CASE = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
SCREAMING_SNAKE_CASE = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def A__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def A__ ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any=False ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =spearmanr(UpperCAmelCase , UpperCAmelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 94
|
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ComputeEnvironment.AMAZON_SAGEMAKER
UpperCamelCase_ = True
UpperCamelCase_ = '''ml.p3.2xlarge'''
UpperCamelCase_ = '''accelerate_sagemaker_execution_role'''
UpperCamelCase_ = '''hf-sm'''
UpperCamelCase_ = '''us-east-1'''
UpperCamelCase_ = 1
UpperCamelCase_ = '''accelerate-sagemaker-1'''
UpperCamelCase_ = '''1.6'''
UpperCamelCase_ = '''4.4'''
UpperCamelCase_ = '''train.py'''
UpperCamelCase_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
UpperCamelCase_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> Dict:
'''simple docstring'''
lowercase : str =_convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , UpperCAmelCase )
assert isinstance(converted_args['''do_train'''] , UpperCAmelCase )
assert isinstance(converted_args['''epochs'''] , UpperCAmelCase )
assert isinstance(converted_args['''learning_rate'''] , UpperCAmelCase )
assert isinstance(converted_args['''max_steps'''] , UpperCAmelCase )
with pytest.raises(UpperCAmelCase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 94
| 1
|
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCAmelCase__ ( ):
__a : Tuple = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=_lowerCamelCase , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=_lowerCamelCase , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=_lowerCamelCase )
return parser.parse_args()
def UpperCAmelCase__ ( ):
__a : str = parse_args()
# Import training_script as a module.
__a : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__a : Tuple = script_fpath.stem
__a : Dict = importlib.import_module(_lowerCamelCase )
# Patch sys.argv
__a : int = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 717
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE__ = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ):
from transformers.testing_utils import pytest_terminal_summary_main
__a : Dict = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(lowerCamelCase_ , id=lowerCamelCase_ )
| 577
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : int = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 649
|
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( A__ , A__ , A__ ) -> str:
# Initialise PyTorch model
lowerCAmelCase = BertConfig.from_json_file(A__ )
print(f"Building PyTorch model from configuration: {config}" )
lowerCAmelCase = BertForPreTraining(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(A__ , A__ , A__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 649
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class __a ( __A ):
'''simple docstring'''
UpperCAmelCase__ : int = """unispeech-sat"""
def __init__( self , UpperCamelCase__=32 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=0.02 , UpperCamelCase__=1E-5 , UpperCamelCase__="group" , UpperCamelCase__="gelu" , UpperCamelCase__=(512, 512, 512, 512, 512, 512, 512) , UpperCamelCase__=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase__=False , UpperCamelCase__=128 , UpperCamelCase__=16 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=0.05 , UpperCamelCase__=10 , UpperCamelCase__=2 , UpperCamelCase__=0.0 , UpperCamelCase__=10 , UpperCamelCase__=0 , UpperCamelCase__=320 , UpperCamelCase__=2 , UpperCamelCase__=0.1 , UpperCamelCase__=100 , UpperCamelCase__=256 , UpperCamelCase__=256 , UpperCamelCase__=0.1 , UpperCamelCase__="mean" , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=256 , UpperCamelCase__=(512, 512, 512, 512, 1500) , UpperCamelCase__=(5, 3, 3, 1, 1) , UpperCamelCase__=(1, 2, 3, 1, 1) , UpperCamelCase__=512 , UpperCamelCase__=0 , UpperCamelCase__=1 , UpperCamelCase__=2 , UpperCamelCase__=504 , **UpperCamelCase__ , ):
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : List[Any] = feat_extract_norm
SCREAMING_SNAKE_CASE_ : str = feat_extract_activation
SCREAMING_SNAKE_CASE_ : str = list(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = list(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = conv_bias
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE_ : str = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE_ : int = len(self.conv_dim )
SCREAMING_SNAKE_CASE_ : int = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Dict = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : Tuple = hidden_dropout
SCREAMING_SNAKE_CASE_ : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE_ : Any = activation_dropout
SCREAMING_SNAKE_CASE_ : Any = feat_proj_dropout
SCREAMING_SNAKE_CASE_ : str = final_dropout
SCREAMING_SNAKE_CASE_ : Optional[int] = layerdrop
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE_ : str = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_clusters
SCREAMING_SNAKE_CASE_ : Tuple = do_stable_layer_norm
SCREAMING_SNAKE_CASE_ : str = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_ : Dict = apply_spec_augment
SCREAMING_SNAKE_CASE_ : Any = mask_time_prob
SCREAMING_SNAKE_CASE_ : Optional[Any] = mask_time_length
SCREAMING_SNAKE_CASE_ : str = mask_time_min_masks
SCREAMING_SNAKE_CASE_ : int = mask_feature_prob
SCREAMING_SNAKE_CASE_ : int = mask_feature_length
SCREAMING_SNAKE_CASE_ : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE_ : List[str] = num_codevectors_per_group
SCREAMING_SNAKE_CASE_ : Tuple = num_codevector_groups
SCREAMING_SNAKE_CASE_ : List[str] = contrastive_logits_temperature
SCREAMING_SNAKE_CASE_ : Optional[Any] = feat_quantizer_dropout
SCREAMING_SNAKE_CASE_ : Any = num_negatives
SCREAMING_SNAKE_CASE_ : Any = codevector_dim
SCREAMING_SNAKE_CASE_ : Tuple = proj_codevector_dim
SCREAMING_SNAKE_CASE_ : Optional[int] = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE_ : Optional[int] = ctc_loss_reduction
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = list(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Any = list(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : int = xvector_output_dim
@property
def __snake_case ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 97
|
def _lowerCamelCase( ):
'''simple docstring'''
return 1
def _lowerCamelCase( lowerCAmelCase__ : int ):
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def _lowerCamelCase( lowerCAmelCase__ : int ):
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase__ )
def _lowerCamelCase( lowerCAmelCase__ : int ):
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCAmelCase__ )
def _lowerCamelCase( lowerCAmelCase__ : int ):
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCAmelCase__ )
def _lowerCamelCase( lowerCAmelCase__ : int ):
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCAmelCase__ )
def _lowerCamelCase( lowerCAmelCase__ : int ):
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCAmelCase__ )
def _lowerCamelCase( lowerCAmelCase__ : int ):
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCAmelCase__ )
def _lowerCamelCase( lowerCAmelCase__ : int = 200 ):
'''simple docstring'''
return two_pound(lowerCAmelCase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 97
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Optional[int] = logging.get_logger(__name__)
A__ : List[Any] = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowercase__ ( _UpperCAmelCase ):
_UpperCAmelCase :Tuple = "fnet"
def __init__( self : Dict , snake_case__ : Dict=3_2000 , snake_case__ : Dict=768 , snake_case__ : str=12 , snake_case__ : Optional[int]=3072 , snake_case__ : Dict="gelu_new" , snake_case__ : str=0.1 , snake_case__ : Tuple=512 , snake_case__ : Optional[Any]=4 , snake_case__ : List[Any]=0.02 , snake_case__ : List[Any]=1E-12 , snake_case__ : Any=False , snake_case__ : Dict=512 , snake_case__ : int=3 , snake_case__ : List[Any]=1 , snake_case__ : Optional[Any]=2 , **snake_case__ : Any , ):
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowerCamelCase_ : Tuple =vocab_size
lowerCamelCase_ : Optional[int] =max_position_embeddings
lowerCamelCase_ : Any =hidden_size
lowerCamelCase_ : Any =num_hidden_layers
lowerCamelCase_ : Union[str, Any] =intermediate_size
lowerCamelCase_ : List[str] =hidden_act
lowerCamelCase_ : Dict =hidden_dropout_prob
lowerCamelCase_ : Union[str, Any] =initializer_range
lowerCamelCase_ : Optional[Any] =type_vocab_size
lowerCamelCase_ : List[str] =layer_norm_eps
lowerCamelCase_ : Optional[Any] =use_tpu_fourier_optimizations
lowerCamelCase_ : Tuple =tpu_short_seq_length
| 153
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[Any] ): # noqa: E741
while r - l > 1:
lowercase = (l + r) // 2
if v[m] >= key:
lowercase = m
else:
lowercase = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE ( lowercase_ : list[int] ):
if len(lowercase_ ) == 0:
return 0
lowercase = [0] * len(lowercase_ )
lowercase = 1
lowercase = v[0]
for i in range(1 , len(lowercase_ ) ):
if v[i] < tail[0]:
lowercase = v[i]
elif v[i] > tail[length - 1]:
lowercase = v[i]
length += 1
else:
lowercase = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 588
| 0
|
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_snake_case = "base_with_context"
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
_lowerCAmelCase : Any = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
_lowerCAmelCase : Optional[Any] = weights[F"layers_{lyr_num}"]
_lowerCAmelCase : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
_lowerCAmelCase : Dict = ly_weight["attention"]
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
_lowerCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
_lowerCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
_lowerCAmelCase : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
_lowerCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
_lowerCAmelCase : str = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
_lowerCAmelCase : List[str] = weights[F"layers_{lyr_num}"]
_lowerCAmelCase : List[Any] = ly_weight["attention"]
_lowerCAmelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
_lowerCAmelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
_lowerCAmelCase : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
_lowerCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
_lowerCAmelCase : Any = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
_lowerCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
_lowerCAmelCase : List[Any] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_lowerCAmelCase : List[Any] = weights[F"layers_{lyr_num}"]
_lowerCAmelCase : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
_lowerCAmelCase : Any = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
_lowerCAmelCase : int = ly_weight["self_attention"]
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
_lowerCAmelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
_lowerCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
_lowerCAmelCase : int = ly_weight["MultiHeadDotProductAttention_0"]
_lowerCAmelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
_lowerCAmelCase : int = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
_lowerCAmelCase : int = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
_lowerCAmelCase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
_lowerCAmelCase : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
_lowerCAmelCase : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = checkpoints.load_tax_checkpoint(args.checkpoint_path )
_lowerCAmelCase : Optional[int] = jnp.tree_util.tree_map(onp.array , _lowerCamelCase )
_lowerCAmelCase : int = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
_lowerCAmelCase : Any = os.path.join(args.checkpoint_path , ".." , "config.gin" )
_lowerCAmelCase : Dict = inference.parse_training_gin_file(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = inference.InferenceModel(args.checkpoint_path , _lowerCamelCase )
_lowerCAmelCase : str = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
_lowerCAmelCase : List[str] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
_lowerCAmelCase : str = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
_lowerCAmelCase : Dict = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
_lowerCAmelCase : List[str] = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , _lowerCamelCase )
_lowerCAmelCase : int = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , _lowerCamelCase )
_lowerCAmelCase : List[str] = load_decoder(ta_checkpoint["target"]["decoder"] , _lowerCamelCase )
_lowerCAmelCase : List[str] = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
_lowerCAmelCase : int = SpectrogramDiffusionPipeline(
notes_encoder=_lowerCamelCase , continuous_encoder=_lowerCamelCase , decoder=_lowerCamelCase , scheduler=_lowerCamelCase , melgan=_lowerCamelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
_snake_case = parser.parse_args()
main(args)
| 658
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'focalnet'
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=False, __a=[192, 384, 768, 768], __a=[2, 2, 6, 2], __a=[2, 2, 2, 2], __a=[3, 3, 3, 3], __a="gelu", __a=4.0, __a=0.0, __a=0.1, __a=False, __a=1E-4, __a=False, __a=False, __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : List[Any] = use_conv_embed
_lowerCAmelCase : Any = hidden_sizes
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Dict = focal_levels
_lowerCAmelCase : Optional[Any] = focal_windows
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : str = use_layerscale
_lowerCAmelCase : str = layerscale_value
_lowerCAmelCase : Union[str, Any] = use_post_layernorm
_lowerCAmelCase : Optional[int] = use_post_layernorm_in_modulation
_lowerCAmelCase : str = normalize_modulator
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = encoder_stride
_lowerCAmelCase : List[str] = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
| 658
| 1
|
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = 'https://openaipublic.azureedge.net/jukebox/models/'
lowercase_ = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def UpperCAmelCase ( _lowercase : Tuple ) -> Tuple:
"""simple docstring"""
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 1_0:
lowerCAmelCase_ = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 1_0:
lowerCAmelCase_ = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 1_0:
lowerCAmelCase_ = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 1_0:
lowerCAmelCase_ = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
lowerCAmelCase_ = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
lowerCAmelCase_ = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowerCAmelCase_ = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
lowerCAmelCase_ = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def UpperCAmelCase ( _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Any , _lowercase : Dict ) -> Any:
"""simple docstring"""
lowerCAmelCase_ = {}
import re
lowerCAmelCase_ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCAmelCase_ = re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase_ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase_ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCAmelCase_ = re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase_ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase_ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
lowerCAmelCase_ = re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase_ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__lowercase ):
lowerCAmelCase_ = re_encoder_block_conv_in.match(__lowercase )
lowerCAmelCase_ = regex_match.groups()
lowerCAmelCase_ = int(groups[2] ) * 2 + int(groups[3] )
lowerCAmelCase_ = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
lowerCAmelCase_ = re_encoder_block_conv_in.sub(__lowercase , __lowercase )
elif re_encoder_block_resnet.fullmatch(__lowercase ):
lowerCAmelCase_ = re_encoder_block_resnet.match(__lowercase )
lowerCAmelCase_ = regex_match.groups()
lowerCAmelCase_ = int(groups[2] ) * 2 + int(groups[3] )
lowerCAmelCase_ = {'1': 1, '3': 2}[groups[-2]]
lowerCAmelCase_ = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
lowerCAmelCase_ = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
lowerCAmelCase_ = prefix + resnet_block
lowerCAmelCase_ = re_encoder_block_resnet.sub(__lowercase , __lowercase )
elif re_encoder_block_proj_out.fullmatch(__lowercase ):
lowerCAmelCase_ = re_encoder_block_proj_out.match(__lowercase )
lowerCAmelCase_ = regex_match.groups()
lowerCAmelCase_ = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
lowerCAmelCase_ = re_encoder_block_proj_out.sub(__lowercase , __lowercase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__lowercase ):
lowerCAmelCase_ = re_decoder_block_conv_out.match(__lowercase )
lowerCAmelCase_ = regex_match.groups()
lowerCAmelCase_ = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCAmelCase_ = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
lowerCAmelCase_ = re_decoder_block_conv_out.sub(__lowercase , __lowercase )
elif re_decoder_block_resnet.fullmatch(__lowercase ):
lowerCAmelCase_ = re_decoder_block_resnet.match(__lowercase )
lowerCAmelCase_ = regex_match.groups()
lowerCAmelCase_ = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCAmelCase_ = {'1': 1, '3': 2}[groups[-2]]
lowerCAmelCase_ = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
lowerCAmelCase_ = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
lowerCAmelCase_ = prefix + resnet_block
lowerCAmelCase_ = re_decoder_block_resnet.sub(__lowercase , __lowercase )
elif re_decoder_block_proj_in.fullmatch(__lowercase ):
lowerCAmelCase_ = re_decoder_block_proj_in.match(__lowercase )
lowerCAmelCase_ = regex_match.groups()
lowerCAmelCase_ = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
lowerCAmelCase_ = re_decoder_block_proj_in.sub(__lowercase , __lowercase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__lowercase ):
lowerCAmelCase_ = re_prior_cond_conv_out.match(__lowercase )
lowerCAmelCase_ = regex_match.groups()
lowerCAmelCase_ = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCAmelCase_ = F"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
lowerCAmelCase_ = re_prior_cond_conv_out.sub(__lowercase , __lowercase )
elif re_prior_cond_resnet.fullmatch(__lowercase ):
lowerCAmelCase_ = re_prior_cond_resnet.match(__lowercase )
lowerCAmelCase_ = regex_match.groups()
lowerCAmelCase_ = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCAmelCase_ = {'1': 1, '3': 2}[groups[-2]]
lowerCAmelCase_ = F"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
lowerCAmelCase_ = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
lowerCAmelCase_ = prefix + resnet_block
lowerCAmelCase_ = re_prior_cond_resnet.sub(__lowercase , __lowercase )
elif re_prior_cond_proj_in.fullmatch(__lowercase ):
lowerCAmelCase_ = re_prior_cond_proj_in.match(__lowercase )
lowerCAmelCase_ = regex_match.groups()
lowerCAmelCase_ = F"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
lowerCAmelCase_ = re_prior_cond_proj_in.sub(__lowercase , __lowercase )
# keep original key
else:
lowerCAmelCase_ = original_key
lowerCAmelCase_ = replace_key(__lowercase )
if F"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(F"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[F"""{key_prefix}.{key}"""].shape:
lowerCAmelCase_ = model_state_dict[F"""{key_prefix}.{key}"""]
print(F"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
lowerCAmelCase_ = original_key
lowerCAmelCase_ = original_key
lowerCAmelCase_ = value
return new_dict
@torch.no_grad()
def UpperCAmelCase ( _lowercase : Optional[Any]=None , _lowercase : List[Any]=None ) -> Dict:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
lowerCAmelCase_ = requests.get(F"""{PREFIX}{file}""" , allow_redirects=__lowercase )
os.makedirs(F"""{pytorch_dump_folder_path}/""" , exist_ok=__lowercase )
open(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , '''wb''' ).write(r.content )
lowerCAmelCase_ = MODEL_MAPPING[model_name.split('''/''' )[-1]]
lowerCAmelCase_ = JukeboxConfig.from_pretrained(__lowercase )
lowerCAmelCase_ = JukeboxModel(__lowercase )
lowerCAmelCase_ = []
lowerCAmelCase_ = {}
for i, dict_name in enumerate(__lowercase ):
lowerCAmelCase_ = torch.load(F"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )['model']
lowerCAmelCase_ = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
lowerCAmelCase_ = old_dic[k]
elif k.endswith('''.w''' ):
lowerCAmelCase_ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowerCAmelCase_ = old_dic[k]
else:
lowerCAmelCase_ = old_dic[k]
lowerCAmelCase_ = 'vqvae' if i == 0 else F"""priors.{3 - i}"""
lowerCAmelCase_ = fix_jukebox_keys(__lowercase , model.state_dict() , __lowercase , __lowercase )
weight_dict.append(__lowercase )
lowerCAmelCase_ = weight_dict.pop(0 )
model.vqvae.load_state_dict(__lowercase )
for i in range(len(__lowercase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
with open(F"""{pytorch_dump_folder_path}/mapping.json""" , '''w''' ) as txtfile:
json.dump(__lowercase , __lowercase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
return weight_dict
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
lowercase_ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 552
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class UpperCAmelCase ( __A , __A ):
'''simple docstring'''
lowerCamelCase_ = '''bit'''
lowerCamelCase_ = ['''preactivation''', '''bottleneck''']
lowerCamelCase_ = ['''SAME''', '''VALID''']
def __init__( self , lowercase=3 , lowercase=6_4 , lowercase=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , lowercase=[3, 4, 6, 3] , lowercase="preactivation" , lowercase="relu" , lowercase=None , lowercase=3_2 , lowercase=0.0 , lowercase=False , lowercase=3_2 , lowercase=1 , lowercase=None , lowercase=None , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A_ : Tuple = global_padding.upper()
else:
raise ValueError(F'''Padding strategy {global_padding} not supported''' )
A_ : Any = num_channels
A_ : Any = embedding_size
A_ : List[Any] = hidden_sizes
A_ : int = depths
A_ : Union[str, Any] = layer_type
A_ : List[Any] = hidden_act
A_ : Tuple = global_padding
A_ : List[str] = num_groups
A_ : int = drop_path_rate
A_ : str = embedding_dynamic_padding
A_ : Dict = output_stride
A_ : Any = width_factor
A_ : int = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(lowercase ) + 1 )]
A_ , A_ : List[Any] = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
| 558
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "facebook/bart-large-mnli"
a__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
a__ = "text_classifier"
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ["text", ["text"]]
a__ = ["text"]
def A__ ( self):
super().setup()
_UpperCamelCase : List[Any] = self.model.config
_UpperCamelCase : Optional[int] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail'):
_UpperCamelCase : Tuple = int(__snake_case)
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.')
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : List[Any] = labels
return self.pre_processor(
[text] * len(__snake_case) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def A__ ( self , __snake_case):
_UpperCamelCase : str = outputs.logits
_UpperCamelCase : Optional[Any] = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 719
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "facebook/bart-large-mnli"
a__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
a__ = "text_classifier"
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ["text", ["text"]]
a__ = ["text"]
def A__ ( self):
super().setup()
_UpperCamelCase : List[Any] = self.model.config
_UpperCamelCase : Optional[int] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail'):
_UpperCamelCase : Tuple = int(__snake_case)
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.')
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : List[Any] = labels
return self.pre_processor(
[text] * len(__snake_case) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def A__ ( self , __snake_case):
_UpperCamelCase : str = outputs.logits
_UpperCamelCase : Optional[Any] = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 648
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : str = AltDiffusionPipeline
_UpperCAmelCase : Tuple = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
lowerCamelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
lowerCamelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
torch.manual_seed(0 )
lowerCamelCase : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowerCamelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
lowerCamelCase : List[Any] = CLIPTextModel(__magic_name__ )
lowerCamelCase : List[Any] = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
lowerCamelCase : Optional[int] = 7_7
lowerCamelCase : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=0 ):
if str(__magic_name__ ).startswith("""mps""" ):
lowerCamelCase : Union[str, Any] = torch.manual_seed(__magic_name__ )
else:
lowerCamelCase : Dict = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
lowerCamelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def UpperCamelCase__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Any = self.get_dummy_components()
torch.manual_seed(0 )
lowerCamelCase : Dict = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCamelCase : Any = RobertaSeriesModelWithTransformation(__magic_name__ )
lowerCamelCase : Dict = text_encoder
lowerCamelCase : Optional[int] = AltDiffusionPipeline(**__magic_name__ )
lowerCamelCase : Tuple = alt_pipe.to(__magic_name__ )
alt_pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCamelCase : Optional[int] = self.get_dummy_inputs(__magic_name__ )
lowerCamelCase : Tuple = """A photo of an astronaut"""
lowerCamelCase : List[Any] = alt_pipe(**__magic_name__ )
lowerCamelCase : Optional[int] = output.images
lowerCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase : List[str] = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : List[Any] = self.get_dummy_components()
lowerCamelCase : str = PNDMScheduler(skip_prk_steps=__magic_name__ )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCamelCase : List[str] = RobertaSeriesModelWithTransformation(__magic_name__ )
lowerCamelCase : str = text_encoder
lowerCamelCase : Union[str, Any] = AltDiffusionPipeline(**__magic_name__ )
lowerCamelCase : Union[str, Any] = alt_pipe.to(__magic_name__ )
alt_pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCamelCase : List[str] = self.get_dummy_inputs(__magic_name__ )
lowerCamelCase : List[Any] = alt_pipe(**__magic_name__ )
lowerCamelCase : List[str] = output.images
lowerCamelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase : Tuple = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
# make sure here that pndm scheduler skips prk
lowerCamelCase : Any = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=__magic_name__ )
lowerCamelCase : int = alt_pipe.to(__magic_name__ )
alt_pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCamelCase : Dict = """A painting of a squirrel eating a burger"""
lowerCamelCase : Any = torch.manual_seed(0 )
lowerCamelCase : List[Any] = alt_pipe([prompt] , generator=__magic_name__ , guidance_scale=6.0 , num_inference_steps=2_0 , output_type="""np""" )
lowerCamelCase : Union[str, Any] = output.images
lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase : List[str] = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""" )
lowerCamelCase : Tuple = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=__magic_name__ , safety_checker=__magic_name__ )
lowerCamelCase : Optional[Any] = alt_pipe.to(__magic_name__ )
alt_pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCamelCase : Union[str, Any] = """A painting of a squirrel eating a burger"""
lowerCamelCase : int = torch.manual_seed(0 )
lowerCamelCase : Optional[int] = alt_pipe([prompt] , generator=__magic_name__ , num_inference_steps=2 , output_type="""numpy""" )
lowerCamelCase : List[Any] = output.images
lowerCamelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase : Optional[int] = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 681
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCamelCase ={
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 681
| 1
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_A = get_tests_dir("fixtures")
class _lowerCAmelCase ( unittest.TestCase ):
def __a ( self ) -> str:
# A mock response for an HTTP head request to emulate server down
lowerCAmelCase_ = mock.Mock()
lowerCAmelCase_ = 500
lowerCAmelCase_ = {}
lowerCAmelCase_ = HTTPError
lowerCAmelCase_ = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_UpperCamelCase ) as mock_head:
lowerCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def __a ( self ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
lowerCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
@classmethod
def __a ( cls ) -> int:
lowerCAmelCase_ = TOKEN
HfFolder.save_token(_UpperCamelCase )
@classmethod
def __a ( cls ) -> int:
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
lowerCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_UpperCamelCase , repo_id="test-feature-extractor" , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
lowerCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
lowerCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_UpperCamelCase , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
lowerCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
def __a ( self ) -> Tuple:
CustomFeatureExtractor.register_for_auto_class()
lowerCAmelCase_ = CustomFeatureExtractor.from_pretrained(_UpperCamelCase )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(
f"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=_UpperCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 721
|
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( __a ):
_lowercase ='''SpeechT5FeatureExtractor'''
_lowercase ='''SpeechT5Tokenizer'''
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> int:
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
lowerCAmelCase_ = kwargs.pop("audio" , _UpperCamelCase )
lowerCAmelCase_ = kwargs.pop("text" , _UpperCamelCase )
lowerCAmelCase_ = kwargs.pop("text_target" , _UpperCamelCase )
lowerCAmelCase_ = kwargs.pop("audio_target" , _UpperCamelCase )
lowerCAmelCase_ = kwargs.pop("sampling_rate" , _UpperCamelCase )
if audio is not None and text is not None:
raise ValueError(
"Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" )
if audio_target is not None and text_target is not None:
raise ValueError(
"Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." )
if audio is not None:
lowerCAmelCase_ = self.feature_extractor(_UpperCamelCase , *_UpperCamelCase , sampling_rate=_UpperCamelCase , **_UpperCamelCase )
elif text is not None:
lowerCAmelCase_ = self.tokenizer(_UpperCamelCase , **_UpperCamelCase )
else:
lowerCAmelCase_ = None
if audio_target is not None:
lowerCAmelCase_ = self.feature_extractor(audio_target=_UpperCamelCase , *_UpperCamelCase , sampling_rate=_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = targets["input_values"]
elif text_target is not None:
lowerCAmelCase_ = self.tokenizer(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = targets["input_ids"]
else:
lowerCAmelCase_ = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase_ = labels
lowerCAmelCase_ = targets.get("attention_mask" )
if decoder_attention_mask is not None:
lowerCAmelCase_ = decoder_attention_mask
return inputs
def __a ( self , *_UpperCamelCase , **_UpperCamelCase ) -> str:
lowerCAmelCase_ = kwargs.pop("input_values" , _UpperCamelCase )
lowerCAmelCase_ = kwargs.pop("input_ids" , _UpperCamelCase )
lowerCAmelCase_ = kwargs.pop("labels" , _UpperCamelCase )
if input_values is not None and input_ids is not None:
raise ValueError("Cannot process both `input_values` and `input_ids` inputs." )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." )
if input_values is not None:
lowerCAmelCase_ = self.feature_extractor.pad(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase )
elif input_ids is not None:
lowerCAmelCase_ = self.tokenizer.pad(_UpperCamelCase , **_UpperCamelCase )
else:
lowerCAmelCase_ = None
if labels is not None:
if "input_ids" in labels or (isinstance(_UpperCamelCase , _UpperCamelCase ) and "input_ids" in labels[0]):
lowerCAmelCase_ = self.tokenizer.pad(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = targets["input_ids"]
else:
lowerCAmelCase_ = self.feature_extractor.feature_size
lowerCAmelCase_ = self.feature_extractor.num_mel_bins
lowerCAmelCase_ = self.feature_extractor.pad(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = feature_size_hack
lowerCAmelCase_ = targets["input_values"]
else:
lowerCAmelCase_ = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase_ = labels
lowerCAmelCase_ = targets.get("attention_mask" )
if decoder_attention_mask is not None:
lowerCAmelCase_ = decoder_attention_mask
return inputs
def __a ( self , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def __a ( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
| 279
| 0
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
snake_case__ : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class snake_case ( _snake_case ):
'''simple docstring'''
UpperCamelCase__ : bool = field(default=_snake_case , metadata={"help": "Whether to use SortishSampler or not."} )
UpperCamelCase__ : bool = field(
default=_snake_case , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
UpperCamelCase__ : Optional[int] = field(
default=_snake_case , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
UpperCamelCase__ : Optional[int] = field(
default=_snake_case , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
UpperCamelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=_snake_case , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def UpperCAmelCase ( self : Dict ) ->Tuple:
'''simple docstring'''
UpperCAmelCase__ = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ = v.to_dict()
return d
| 392
|
from bisect import bisect
from itertools import accumulate
def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ = sorted(zip(_lowerCAmelCase , _lowerCAmelCase ) , key=lambda _lowerCAmelCase : x[0] / x[1] , reverse=_lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = [i[0] for i in r], [i[1] for i in r]
UpperCAmelCase__ = list(accumulate(_lowerCAmelCase ) )
UpperCAmelCase__ = bisect(_lowerCAmelCase , _lowerCAmelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 392
| 1
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
_UpperCamelCase : int = logging.get_logger(__name__)
_UpperCamelCase : int = """Hello, World!"""
_UpperCamelCase : Optional[int] = """en_XX"""
def __UpperCamelCase ( snake_case , snake_case , snake_case ) -> Union[str, Any]:
'''simple docstring'''
__A = Path('''data_bin''' )
__A = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case ).parent ) , checkpoint_file=Path(snake_case ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(snake_case ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(snake_case ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(snake_case )
__A = xmod.model.encoder.sentence_encoder
__A = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__A = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , snake_case )
__A = XmodForSequenceClassification(snake_case ) if classification_head else XmodForMaskedLM(snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
__A = xmod_sent_encoder.embed_tokens.weight
__A = xmod_sent_encoder.embed_positions.weight
__A = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__A = xmod_sent_encoder.layernorm_embedding.weight
__A = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__A = model.roberta.encoder.layer[i]
__A = xmod_sent_encoder.layers[i]
# self attention
__A = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
__A = xmod_layer.self_attn.q_proj.weight
__A = xmod_layer.self_attn.q_proj.bias
__A = xmod_layer.self_attn.k_proj.weight
__A = xmod_layer.self_attn.k_proj.bias
__A = xmod_layer.self_attn.v_proj.weight
__A = xmod_layer.self_attn.v_proj.bias
# self-attention output
__A = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
__A = xmod_layer.self_attn.out_proj.weight
__A = xmod_layer.self_attn.out_proj.bias
__A = xmod_layer.self_attn_layer_norm.weight
__A = xmod_layer.self_attn_layer_norm.bias
# intermediate
__A = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
__A = xmod_layer.fca.weight
__A = xmod_layer.fca.bias
# output
__A = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
__A = xmod_layer.fca.weight
__A = xmod_layer.fca.bias
__A = xmod_layer.final_layer_norm.weight
__A = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__A = xmod_layer.adapter_layer_norm.weight
__A = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__A = bert_output.adapter_modules[lang_code]
__A = xmod_layer.adapter_modules[lang_code]
__A = from_adapter.fca.weight
__A = from_adapter.fca.bias
__A = from_adapter.fca.weight
__A = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__A = xmod_sent_encoder.layer_norm.weight
__A = xmod_sent_encoder.layer_norm.bias
if classification_head:
__A = xmod.model.classification_heads['''mnli'''].dense.weight
__A = xmod.model.classification_heads['''mnli'''].dense.bias
__A = xmod.model.classification_heads['''mnli'''].out_proj.weight
__A = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
__A = xmod.model.encoder.lm_head.dense.weight
__A = xmod.model.encoder.lm_head.dense.bias
__A = xmod.model.encoder.lm_head.layer_norm.weight
__A = xmod.model.encoder.lm_head.layer_norm.bias
__A = xmod.model.encoder.lm_head.weight
__A = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__A = xmod.encode(snake_case ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case )
__A = model(snake_case )[0]
if classification_head:
__A = xmod.model.classification_heads['''mnli'''](xmod.extract_features(snake_case ) )
else:
__A = xmod.model(snake_case , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__A = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__A = torch.allclose(snake_case , snake_case , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(snake_case ).mkdir(parents=snake_case , exist_ok=snake_case )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case )
if __name__ == "__main__":
_UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
_UpperCamelCase : int = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 341
|
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCamelCase ( snake_case ) -> Dict:
'''simple docstring'''
__A = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def __UpperCamelCase ( snake_case , snake_case ) -> List[str]:
'''simple docstring'''
__A = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def __UpperCamelCase ( snake_case ) -> Any:
'''simple docstring'''
__A = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", '''stage2.cls_token''') )
return token
def __UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
__A = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __UpperCamelCase ( snake_case , snake_case , snake_case , snake_case ) -> List[Any]:
'''simple docstring'''
__A = '''imagenet-1k-id2label.json'''
__A = 1_0_0_0
__A = '''huggingface/label-files'''
__A = num_labels
__A = json.load(open(cached_download(hf_hub_url(snake_case , snake_case , repo_type='''dataset''' ) ) , '''r''' ) )
__A = {int(snake_case ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
__A = __A = CvtConfig(num_labels=snake_case , idalabel=snake_case , labelaid=snake_case )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
__A = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
__A = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__A = [2, 2, 2_0]
__A = [3, 1_2, 1_6]
__A = [1_9_2, 7_6_8, 1_0_2_4]
__A = CvtForImageClassification(snake_case )
__A = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
__A = image_size
__A = torch.load(snake_case , map_location=torch.device('''cpu''' ) )
__A = OrderedDict()
__A = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__A = list_of_state_dict + cls_token(snake_case )
__A = list_of_state_dict + embeddings(snake_case )
for cnt in range(config.depth[idx] ):
__A = list_of_state_dict + attention(snake_case , snake_case )
__A = list_of_state_dict + final()
for gg in list_of_state_dict:
print(snake_case )
for i in range(len(snake_case ) ):
__A = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(snake_case )
model.save_pretrained(snake_case )
image_processor.save_pretrained(snake_case )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=3_8_4,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCamelCase : str = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 341
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class lowercase_ :
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : int = 6 ):
_A = None
_A = None
self.create_linked_list(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : int ):
_A = Node()
_A = current_node
_A = current_node
_A = current_node
for _ in range(1 , _UpperCAmelCase ):
_A = Node()
_A = current_node
_A = previous_node
_A = current_node
_A = self.front
_A = previous_node
def lowerCAmelCase_ ( self : int ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def lowerCAmelCase_ ( self : List[str] ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Any ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
_A = self.rear.next
if self.rear:
_A = data
def lowerCAmelCase_ ( self : List[Any] ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
_A = self.front.data
_A = None
return data
_A = self.front
_A = old_front.next
_A = old_front.data
_A = None
return data
def lowerCAmelCase_ ( self : Tuple ):
if self.is_empty():
raise Exception('Empty Queue' )
def lowerCAmelCase_ ( self : Any ):
if self.rear and self.rear.next == self.front:
raise Exception('Full Queue' )
class lowercase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
_A = None
_A = None
_A = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7
|
def lowercase_ (A : int , A : int ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
snake_case__ : List[str] = str(bin(A ) )[2:] # remove the leading "0b"
snake_case__ : int = str(bin(A ) )[2:] # remove the leading "0b"
snake_case__ : Dict = max(len(A ) , len(A ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(A ) , b_binary.zfill(A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 478
| 0
|
'''simple docstring'''
def __lowercase (_lowercase ) -> list[int]:
"""simple docstring"""
__lowerCamelCase : int = len(_lowercase )
for i in range(_lowercase ):
for j in range(i + 1, _lowercase ):
if numbers[j] < numbers[i]:
__lowerCamelCase , __lowerCamelCase : int = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
UpperCAmelCase__ :Optional[int] = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase__ :Union[str, Any] = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 483
|
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowercase (_lowercase, _lowercase, _lowercase ) -> Optional[Any]:
"""simple docstring"""
# Initialise PyTorch model
__lowerCamelCase : str = RemBertConfig.from_json_file(_lowercase )
print("""Building PyTorch model from configuration: {}""".format(str(_lowercase ) ) )
__lowerCamelCase : List[Any] = RemBertModel(_lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_lowercase, _lowercase, _lowercase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(_lowercase ) )
torch.save(model.state_dict(), _lowercase )
if __name__ == "__main__":
UpperCAmelCase__ :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ :List[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 483
| 1
|
from collections import Counter
from timeit import timeit
def a__ ( _UpperCamelCase : str = "" ,):
return sum(c % 2 for c in Counter(input_str.replace(''' ''' ,'''''' ).lower() ).values() ) < 2
def a__ ( _UpperCamelCase : str = "" ):
if len(_UpperCamelCase ) == 0:
return True
__lowerCamelCase = input_str.replace(''' ''' ,'''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__lowerCamelCase = {}
for character in lower_case_input_str:
__lowerCamelCase = character_freq_dict.get(_UpperCamelCase ,0 ) + 1
__lowerCamelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def a__ ( _UpperCamelCase : str = "" ):
print('''\nFor string = ''' ,_UpperCamelCase ,''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' ,'''\tans =''' ,can_string_be_rearranged_as_palindrome_counter(_UpperCamelCase ) ,'''\ttime =''' ,timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' ,setup='''import __main__ as z''' ,) ,'''seconds''' ,)
print(
'''> can_string_be_rearranged_as_palindrome()''' ,'''\tans =''' ,can_string_be_rearranged_as_palindrome(_UpperCamelCase ) ,'''\ttime =''' ,timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' ,setup='''import __main__ as z''' ,) ,'''seconds''' ,)
if __name__ == "__main__":
a_ = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
a_ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"{check_str} can {'' if status else 'not '}be rearranged as a palindrome")
| 175
|
import numpy
# List of input, output pairs
a_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
a_ = (((515, 22, 13), 555), ((61, 35, 49), 150))
a_ = [2, 4, 1, 5]
a_ = len(train_data)
a_ = 0.0_09
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : List[Any]="train" ):
return calculate_hypothesis_value(_UpperCamelCase ,_UpperCamelCase ) - output(
_UpperCamelCase ,_UpperCamelCase )
def a__ ( _UpperCamelCase : Optional[Any] ):
__lowerCamelCase = 0
for i in range(len(_UpperCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : List[Any] ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : Union[str, Any] ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Dict=m ):
__lowerCamelCase = 0
for i in range(_UpperCamelCase ):
if index == -1:
summation_value += _error(_UpperCamelCase )
else:
summation_value += _error(_UpperCamelCase ) * train_data[i][0][index]
return summation_value
def a__ ( _UpperCamelCase : Optional[int] ):
__lowerCamelCase = summation_of_cost_derivative(_UpperCamelCase ,_UpperCamelCase ) / m
return cost_derivative_value
def a__ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__lowerCamelCase = 0.000_002
__lowerCamelCase = 0
__lowerCamelCase = 0
while True:
j += 1
__lowerCamelCase = [0, 0, 0, 0]
for i in range(0 ,len(_UpperCamelCase ) ):
__lowerCamelCase = get_cost_derivative(i - 1 )
__lowerCamelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_UpperCamelCase ,_UpperCamelCase ,atol=_UpperCamelCase ,rtol=_UpperCamelCase ,):
break
__lowerCamelCase = temp_parameter_vector
print(('''Number of iterations:''', j) )
def a__ ( ):
for i in range(len(_UpperCamelCase ) ):
print(('''Actual output value:''', output(_UpperCamelCase ,'''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(_UpperCamelCase ,'''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 175
| 1
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a :
def __init__( self : Optional[int] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any=13 , lowerCamelCase_ : Optional[int]=30 , lowerCamelCase_ : Dict=2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[Any]=32 , lowerCamelCase_ : Any=2 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : Tuple=37 , lowerCamelCase_ : List[str]="gelu" , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Optional[int]=10 , lowerCamelCase_ : List[Any]=0.02 , lowerCamelCase_ : int=3 , lowerCamelCase_ : int=None , lowerCamelCase_ : List[str]=2 , ) -> List[Any]:
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = scope
__a = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__a = (image_size // patch_size) ** 2
__a = num_patches + 2
def lowerCAmelCase_ ( self : Dict ) -> List[str]:
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : Optional[int] ) -> int:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCAmelCase_ ( self : int , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : str ) -> Optional[Any]:
__a = TFDeiTModel(config=lowerCamelCase_ )
__a = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ) -> Any:
__a = TFDeiTForMaskedImageModeling(config=lowerCamelCase_ )
__a = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__a = 1
__a = TFDeiTForMaskedImageModeling(lowerCamelCase_ )
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase_ ( self : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Any , lowerCamelCase_ : str ) -> Dict:
__a = self.type_sequence_label_size
__a = TFDeiTForImageClassification(lowerCamelCase_ )
__a = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a = 1
__a = TFDeiTForImageClassification(lowerCamelCase_ )
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self : List[str] ) -> List[str]:
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class a ( A_ , A_ , unittest.TestCase ):
A_ : str = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
A_ : Tuple = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
A_ : Union[str, Any] = False
A_ : List[str] = False
A_ : List[str] = False
A_ : List[Any] = False
def lowerCAmelCase_ ( self : int ) -> Any:
__a = TFDeiTModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def lowerCAmelCase_ ( self : str ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def lowerCAmelCase_ ( self : Tuple ) -> Dict:
pass
def lowerCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , tf.keras.layers.Dense ) )
def lowerCAmelCase_ ( self : int ) -> str:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase_ )
__a = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCAmelCase_ ( self : Dict ) -> List[str]:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCAmelCase_ ( self : Any ) -> Optional[Any]:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def lowerCAmelCase_ ( self : Optional[int] ) -> int:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def lowerCAmelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple=False ) -> List[str]:
__a = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowerCAmelCase_ ( self : List[str] ) -> List[Any]:
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = TFDeiTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def UpperCamelCase ( ):
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self : Union[str, Any] ) -> str:
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self : str ) -> Dict:
__a = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" )
# forward pass
__a = model(**lowerCamelCase_ )
# verify the logits
__a = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
__a = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 716
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a ( A_ , A_ , A_ ):
@register_to_config
def __init__( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : float , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , ) -> List[str]:
super().__init__()
__a = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
__a = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
__a = False
__a = nn.Dropout(p=lowerCamelCase_ )
__a = TaConfig(
vocab_size=lowerCamelCase_ , d_model=lowerCamelCase_ , num_heads=lowerCamelCase_ , d_kv=lowerCamelCase_ , d_ff=lowerCamelCase_ , dropout_rate=lowerCamelCase_ , feed_forward_proj=lowerCamelCase_ , is_decoder=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , )
__a = nn.ModuleList()
for lyr_num in range(lowerCamelCase_ ):
__a = TaBlock(lowerCamelCase_ )
self.encoders.append(lowerCamelCase_ )
__a = TaLayerNorm(lowerCamelCase_ )
__a = nn.Dropout(p=lowerCamelCase_ )
def lowerCAmelCase_ ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int ) -> Tuple:
__a = self.token_embedder(lowerCamelCase_ )
__a = encoder_input_tokens.shape[1]
__a = torch.arange(lowerCamelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCamelCase_ )
__a = self.dropout_pre(lowerCamelCase_ )
# inverted the attention mask
__a = encoder_input_tokens.size()
__a = self.get_extended_attention_mask(lowerCamelCase_ , lowerCamelCase_ )
for lyr in self.encoders:
__a = lyr(lowerCamelCase_ , lowerCamelCase_ )[0]
__a = self.layer_norm(lowerCamelCase_ )
return self.dropout_post(lowerCamelCase_ ), encoder_inputs_mask
| 173
| 0
|
from __future__ import annotations
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Optional[int]:
# Checks if the entire collection has been sorted
if len(lowerCAmelCase_ ) <= 1 or n <= 1:
return
insert_next(lowerCAmelCase_ , n - 1 )
rec_insertion_sort(lowerCAmelCase_ , n - 1 )
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Dict:
# Checks order between adjacent elements
if index >= len(lowerCAmelCase_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
UpperCAmelCase , UpperCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(lowerCAmelCase_ , index + 1 )
if __name__ == "__main__":
__a = input("""Enter integers separated by spaces: """)
__a = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 377
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase ( __snake_case ):
UpperCamelCase = ['''image_processor''', '''tokenizer''']
UpperCamelCase = '''BridgeTowerImageProcessor'''
UpperCamelCase = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : int , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[bool, str, PaddingStrategy] = False , __lowerCamelCase : Union[bool, str, TruncationStrategy] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[str, TensorType]] = None , **__lowerCamelCase : Dict , ) -> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
# add pixel_values + pixel_mask
UpperCAmelCase = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , do_normalize=__lowerCamelCase , do_center_crop=__lowerCamelCase , **__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def _lowercase ( self : Optional[Any] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : List[str] ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def _lowercase ( self : Dict , *__lowerCamelCase : Any , **__lowerCamelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def _lowercase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 377
| 1
|
"""simple docstring"""
from math import pi
def __lowerCAmelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] ) -> Tuple:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 715
|
"""simple docstring"""
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : List[str] = size
_UpperCamelCase : Optional[int] = [0] * size
_UpperCamelCase : List[str] = [0] * size
@staticmethod
def lowercase_ (lowerCAmelCase__ ):
'''simple docstring'''
return index | (index + 1)
@staticmethod
def lowercase_ (lowerCAmelCase__ ):
'''simple docstring'''
return (index & (index + 1)) - 1
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Dict = value
while index < self.size:
_UpperCamelCase : Any = self.get_prev(lowerCAmelCase__ ) + 1
if current_left_border == index:
_UpperCamelCase : List[str] = value
else:
_UpperCamelCase : List[str] = max(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase : List[str] = self.get_next(lowerCAmelCase__ )
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
right -= 1 # Because of right is exclusive
_UpperCamelCase : Tuple = 0
while left <= right:
_UpperCamelCase : List[str] = self.get_prev(lowerCAmelCase__ )
if left <= current_left:
_UpperCamelCase : Optional[Any] = max(lowerCAmelCase__ , self.tree[right] )
_UpperCamelCase : Tuple = current_left
else:
_UpperCamelCase : Optional[int] = max(lowerCAmelCase__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 239
| 0
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
SCREAMING_SNAKE_CASE__ : List[Any] = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
SCREAMING_SNAKE_CASE__ : Any = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
SCREAMING_SNAKE_CASE__ : Tuple = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
return float((preds == labels).mean() )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE="binary" ) -> int:
lowerCamelCase : str = simple_accuracy(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = float(fa_score(y_true=_SCREAMING_SNAKE_CASE ,y_pred=_SCREAMING_SNAKE_CASE ,average=_SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
lowerCamelCase : Union[str, Any] = {}
for id_pred, label in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
lowerCamelCase : List[Any] = f'''{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'''
lowerCamelCase : Optional[int] = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCamelCase : int = [(pred, label)]
lowerCamelCase , lowerCamelCase : str = [], []
for question, preds_labels in question_map.items():
lowerCamelCase , lowerCamelCase : List[Any] = zip(*_SCREAMING_SNAKE_CASE )
lowerCamelCase : Dict = fa_score(y_true=_SCREAMING_SNAKE_CASE ,y_pred=_SCREAMING_SNAKE_CASE ,average="macro" )
fas.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = int(sum(pred == label for pred, label in preds_labels ) == len(_SCREAMING_SNAKE_CASE ) )
ems.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = float(sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : List[Any] = sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = float(fa_score(y_true=_SCREAMING_SNAKE_CASE ,y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ (datasets.Metric ):
'''simple docstring'''
def _lowercase ( self ) -> List[str]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def _lowercase ( self ) -> List[Any]:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCamelCase__ , UpperCamelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCamelCase__ , UpperCamelCase__ , fa_avg="macro" )
elif self.config_name == "record":
lowerCamelCase : Any = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
lowerCamelCase : Optional[int] = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(UpperCamelCase__ , UpperCamelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCamelCase__ , UpperCamelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 311
|
from statistics import mean, stdev
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 3 ) -> list:
lowerCamelCase : Optional[int] = min(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = max(_SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) ,_SCREAMING_SNAKE_CASE ) for x in data]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 3 ) -> list:
lowerCamelCase : Union[str, Any] = mean(_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = stdev(_SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) ,_SCREAMING_SNAKE_CASE ) for x in data]
| 311
| 1
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
UpperCAmelCase__ : int =logging.get_logger(__name__)
class __A ( a ):
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 712
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ : Tuple =logging.get_logger(__name__)
UpperCAmelCase__ : str =['''model.decoder.embed_positions.weights''']
def _lowercase ( _UpperCAmelCase ) -> List[Any]:
if "emb" in name:
lowerCamelCase =name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
lowerCamelCase =name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
lowerCamelCase =name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
lowerCamelCase =name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
lowerCamelCase =name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
lowerCamelCase =name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
lowerCamelCase =name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
lowerCamelCase =name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
lowerCamelCase =name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
lowerCamelCase =name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
lowerCamelCase =name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple[Dict, Dict]:
lowerCamelCase =list(state_dict.keys() )
lowerCamelCase ={}
for key in keys:
lowerCamelCase =state_dict.pop(_UpperCAmelCase )
lowerCamelCase =rename_keys(_UpperCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
lowerCamelCase =val[:hidden_size, :]
lowerCamelCase =val[hidden_size : 2 * hidden_size, :]
lowerCamelCase =val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowerCamelCase =val
else:
lowerCamelCase =val
return state_dict, enc_dec_proj_state_dict
def _lowercase ( _UpperCAmelCase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
lowerCamelCase =10_24
lowerCamelCase =24
lowerCamelCase =16
elif checkpoint == "medium":
lowerCamelCase =15_36
lowerCamelCase =48
lowerCamelCase =24
elif checkpoint == "large":
lowerCamelCase =20_48
lowerCamelCase =48
lowerCamelCase =32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
lowerCamelCase =MusicgenDecoderConfig(
hidden_size=_UpperCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , )
return config
@torch.no_grad()
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="cpu" ) -> Dict:
lowerCamelCase =MusicGen.get_pretrained(_UpperCAmelCase , device=_UpperCAmelCase )
lowerCamelCase =decoder_config_from_checkpoint(_UpperCAmelCase )
lowerCamelCase =fairseq_model.lm.state_dict()
lowerCamelCase , lowerCamelCase =rename_state_dict(
_UpperCAmelCase , hidden_size=decoder_config.hidden_size )
lowerCamelCase =TaEncoderModel.from_pretrained("""t5-base""" )
lowerCamelCase =EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
lowerCamelCase =MusicgenForCausalLM(_UpperCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowerCamelCase , lowerCamelCase =decoder.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(_UpperCAmelCase ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
lowerCamelCase =MusicgenForConditionalGeneration(text_encoder=_UpperCAmelCase , audio_encoder=_UpperCAmelCase , decoder=_UpperCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_UpperCAmelCase )
# check we can do a forward pass
lowerCamelCase =torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowerCamelCase =input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowerCamelCase =model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
lowerCamelCase =AutoTokenizer.from_pretrained("""t5-base""" )
lowerCamelCase =AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
lowerCamelCase =MusicgenProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# set the appropriate bos/pad token ids
lowerCamelCase =20_48
lowerCamelCase =20_48
# set other default generation config params
lowerCamelCase =int(30 * audio_encoder.config.frame_rate )
lowerCamelCase =True
lowerCamelCase =3.0
if pytorch_dump_folder is not None:
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(_UpperCAmelCase )
processor.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
UpperCAmelCase__ : Optional[Any] =parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 269
| 0
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
UpperCamelCase = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowerCAmelCase_ ( _lowercase ):
def __init__( self , _lowerCAmelCase = 1_0_1 ):
_lowercase : List[str] = length
def __len__( self ):
return self.length
def __getitem__( self , _lowerCAmelCase ):
return i
class lowerCAmelCase_ :
def __call__( self , _lowerCAmelCase ):
return {"input_ids": torch.tensor(__UpperCamelCase ), "labels": torch.tensor(__UpperCamelCase )}
class lowerCAmelCase_ ( nn.Module ):
def __init__( self ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_lowercase : str = nn.Linear(1_2_0 , 8_0 )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class lowerCAmelCase_ ( _lowercase ):
@require_torch_neuroncore
def __a ( self ):
_lowercase : Union[str, Any] = F"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
_lowercase : Any = self.get_auto_remove_tmp_dir()
_lowercase : Dict = F"""--output_dir {output_dir}""".split()
_lowercase : Any = ['torchrun'] + distributed_args + args
execute_subprocess_async(__UpperCamelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowerCAmelCase_ ( _lowercase ):
@require_torch_multi_gpu
def __a ( self ):
_lowercase : Optional[int] = F"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
_lowercase : Optional[int] = self.get_auto_remove_tmp_dir()
_lowercase : Any = F"""--output_dir {output_dir}""".split()
_lowercase : List[str] = ['torchrun'] + distributed_args + args
execute_subprocess_async(__UpperCamelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
UpperCamelCase = HfArgumentParser((TrainingArguments,))
UpperCamelCase = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
UpperCamelCase = DummyDataset(dataset_length)
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : List[str] = list(range(len(a__ ) ) )
_lowercase : Tuple = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
F"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
UpperCamelCase = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
UpperCamelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCamelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCamelCase = 2
UpperCamelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCamelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCamelCase = None
| 66
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''gpt_neox'''
def __init__( self : Dict , __UpperCamelCase : int=5_0432 , __UpperCamelCase : List[Any]=6144 , __UpperCamelCase : str=44 , __UpperCamelCase : List[str]=64 , __UpperCamelCase : int=2_4576 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : Dict=0.2_5 , __UpperCamelCase : int=1_0000 , __UpperCamelCase : Optional[Any]=0.0 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Dict=2048 , __UpperCamelCase : Optional[Any]=0.0_2 , __UpperCamelCase : Optional[Any]=1E-5 , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Optional[int]=0 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : List[Any]=False , __UpperCamelCase : List[str]=True , __UpperCamelCase : Optional[Any]=None , **__UpperCamelCase : Union[str, Any] , ) -> Union[str, Any]:
super().__init__(bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = rotary_pct
_UpperCamelCase = rotary_emb_base
_UpperCamelCase = attention_dropout
_UpperCamelCase = hidden_dropout
_UpperCamelCase = classifier_dropout
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = use_cache
_UpperCamelCase = tie_word_embeddings
_UpperCamelCase = use_parallel_residual
_UpperCamelCase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def _UpperCamelCase ( self : Optional[int] ) -> Tuple:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'''got {self.rope_scaling}''' )
_UpperCamelCase = self.rope_scaling.get('''type''' , __UpperCamelCase )
_UpperCamelCase = self.rope_scaling.get('''factor''' , __UpperCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__UpperCamelCase , __UpperCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 420
| 0
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: str ={
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
SCREAMING_SNAKE_CASE_: Optional[int] =[
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Tuple ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase_ = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
UpperCAmelCase_ = getattr(snake_case_ , snake_case_ ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = fairseq_model.state_dict()
UpperCAmelCase_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
UpperCAmelCase_ = None
for name, value in fairseq_dict.items():
UpperCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase_ = True
elif name.split("." )[0] == "proj":
UpperCAmelCase_ = fairseq_model.proj
UpperCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(snake_case_ )[0].split("." )[-2]
UpperCAmelCase_ = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
UpperCAmelCase_ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase_ = "weight_v"
elif "bias" in name:
UpperCAmelCase_ = "bias"
elif "weight" in name:
UpperCAmelCase_ = "weight"
else:
UpperCAmelCase_ = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = full_name.split("conv_layers." )[-1]
UpperCAmelCase_ = name.split("." )
UpperCAmelCase_ = int(items[0] )
UpperCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = emb.weight.shape
UpperCAmelCase_ = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
UpperCAmelCase_ = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( snake_case_ : List[str] ) -> Tuple:
'''simple docstring'''
with open(snake_case_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = [line.split(" " )[0] for line in lines]
UpperCAmelCase_ = len(snake_case_ )
UpperCAmelCase_ = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(snake_case_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Dict , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = WavaVecaConfig.from_pretrained(snake_case_ )
UpperCAmelCase_ = SpeechaTextaConfig.from_pretrained(
snake_case_ , vocab_size=snake_case_ , decoder_layers=snake_case_ , do_stable_layer_norm=snake_case_ )
UpperCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
UpperCAmelCase_ = model[0].eval()
# set weights for wav2vec2 encoder
UpperCAmelCase_ = WavaVecaModel(snake_case_ )
UpperCAmelCase_ = recursively_load_weights_wavaveca(model.encoder , snake_case_ )
UpperCAmelCase_ = SpeechaTextaForCausalLM(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case_ )
# set output linear layer
unexpected_keys.remove("embed_out" )
UpperCAmelCase_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
UpperCAmelCase_ = SpeechEncoderDecoderModel(encoder=snake_case_ , decoder=snake_case_ )
UpperCAmelCase_ = False
# add projection layer
UpperCAmelCase_ = nn.Parameter(projection_layer.weight )
UpperCAmelCase_ = nn.Parameter(projection_layer.bias )
UpperCAmelCase_ = create_vocab_dict(snake_case_ )
with open(os.path.join(snake_case_ , "vocab.json" ) , "w" ) as fp:
json.dump(snake_case_ , snake_case_ )
UpperCAmelCase_ = SpeechaTextaTokenizer(os.path.join(snake_case_ , "vocab.json" ) )
tokenizer.save_pretrained(snake_case_ )
UpperCAmelCase_ = hf_wavavec.config.to_dict()
UpperCAmelCase_ = tokenizer.pad_token_id
UpperCAmelCase_ = tokenizer.bos_token_id
UpperCAmelCase_ = tokenizer.eos_token_id
UpperCAmelCase_ = "speech_to_text_2"
UpperCAmelCase_ = "wav2vec2"
UpperCAmelCase_ = SpeechEncoderDecoderConfig.from_dict(snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
feature_extractor.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[Any] =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_02_24, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 415
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( UpperCamelCase__ ):
a__ : Tuple = ["""image_processor""", """tokenizer"""]
a__ : int = """FlavaImageProcessor"""
a__ : Dict = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__(self : int , __a : Any=None , __a : List[str]=None , **__a : Any ):
UpperCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
UpperCAmelCase_ = kwargs.pop("feature_extractor" )
UpperCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
UpperCAmelCase_ = self.image_processor
def __call__(self : Tuple , __a : Optional[ImageInput] = None , __a : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = False , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Optional[Any] , ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
UpperCAmelCase_ = self.tokenizer(
text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
if images is not None:
UpperCAmelCase_ = self.image_processor(
__a , return_image_mask=__a , return_codebook_pixels=__a , return_tensors=__a , **__a , )
if text is not None and images is not None:
encoding.update(__a )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def _lowercase (self : Optional[int] , *__a : str , **__a : Any ):
return self.tokenizer.batch_decode(*__a , **__a )
def _lowercase (self : Dict , *__a : int , **__a : Optional[Any] ):
return self.tokenizer.decode(*__a , **__a )
@property
def _lowercase (self : int ):
UpperCAmelCase_ = self.tokenizer.model_input_names
UpperCAmelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowercase (self : Any ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def _lowercase (self : Optional[int] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 415
| 1
|
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _a :
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : List[str]=99 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=36 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=37 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE__ : str=16 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE__ : str=6 , SCREAMING_SNAKE_CASE__ : Dict=6 , SCREAMING_SNAKE_CASE__ : Any=3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : List[str]=10_00 , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = text_seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = coordinate_size
lowerCamelCase__ = shape_size
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
lowerCamelCase__ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCamelCase__ = text_seq_length
lowerCamelCase__ = (image_size // patch_size) ** 2 + 1
lowerCamelCase__ = self.text_seq_length + self.image_seq_length
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase__ = bbox[i, j, 3]
lowerCamelCase__ = bbox[i, j, 1]
lowerCamelCase__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase__ = bbox[i, j, 2]
lowerCamelCase__ = bbox[i, j, 0]
lowerCamelCase__ = t
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowerCamelCase__ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = LayoutLMvaModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
# text + image
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCamelCase__ = model(pixel_values=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = LayoutLMvaForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = LayoutLMvaForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
lowerCamelCase__ = LayoutLMvaForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Optional[int] = False
a_ : List[Any] = False
a_ : Tuple = False
a_ : List[str] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : List[Any] = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = LayoutLMvaModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ):
lowerCamelCase__ = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
elif model_class in get_values(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
elif model_class in [
*get_values(SCREAMING_SNAKE_CASE__ ),
]:
lowerCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
elif model_class in [
*get_values(SCREAMING_SNAKE_CASE__ ),
]:
lowerCamelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , )
return inputs_dict
def _UpperCamelCase ( self : str ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self : Dict ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = LayoutLMvaModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def snake_case ( )-> str:
'''simple docstring'''
lowerCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class _a ( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self : Dict ):
return LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE__ ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.tensor([[1, 2]] )
lowerCamelCase__ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowerCamelCase__ = model(
input_ids=input_ids.to(SCREAMING_SNAKE_CASE__ ) , bbox=bbox.to(SCREAMING_SNAKE_CASE__ ) , pixel_values=pixel_values.to(SCREAMING_SNAKE_CASE__ ) , )
# verify the logits
lowerCamelCase__ = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 510
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_snake_case = False
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCamelCase ( self : Tuple ):
return 12
@property
def _UpperCamelCase ( self : str ):
return 12
@property
def _UpperCamelCase ( self : Optional[int] ):
return 32
@property
def _UpperCamelCase ( self : Optional[Any] ):
torch.manual_seed(0 )
lowerCamelCase__ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _UpperCamelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
lowerCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self : Any ):
torch.manual_seed(0 )
lowerCamelCase__ = 12
lowerCamelCase__ = 12
lowerCamelCase__ = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
lowerCamelCase__ = TransformeraDModel(**SCREAMING_SNAKE_CASE__ )
return model
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = 'cpu'
lowerCamelCase__ = self.dummy_vqvae
lowerCamelCase__ = self.dummy_text_encoder
lowerCamelCase__ = self.dummy_tokenizer
lowerCamelCase__ = self.dummy_transformer
lowerCamelCase__ = VQDiffusionScheduler(self.num_embed )
lowerCamelCase__ = LearnedClassifierFreeSamplingEmbeddings(learnable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = VQDiffusionPipeline(
vqvae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , transformer=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , learned_classifier_free_sampling_embeddings=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 'teddy bear playing in the pool'
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 )
lowerCamelCase__ = pipe([prompt] , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='np' )
lowerCamelCase__ = output.images
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 )
lowerCamelCase__ = pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , return_dict=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 )[0]
lowerCamelCase__ = image[0, -3:, -3:, -1]
lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowerCamelCase__ = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = 'cpu'
lowerCamelCase__ = self.dummy_vqvae
lowerCamelCase__ = self.dummy_text_encoder
lowerCamelCase__ = self.dummy_tokenizer
lowerCamelCase__ = self.dummy_transformer
lowerCamelCase__ = VQDiffusionScheduler(self.num_embed )
lowerCamelCase__ = LearnedClassifierFreeSamplingEmbeddings(
learnable=SCREAMING_SNAKE_CASE__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
lowerCamelCase__ = VQDiffusionPipeline(
vqvae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , transformer=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , learned_classifier_free_sampling_embeddings=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 'teddy bear playing in the pool'
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 )
lowerCamelCase__ = pipe([prompt] , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='np' )
lowerCamelCase__ = output.images
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 )
lowerCamelCase__ = pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , return_dict=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 )[0]
lowerCamelCase__ = image[0, -3:, -3:, -1]
lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowerCamelCase__ = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
lowerCamelCase__ = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
lowerCamelCase__ = pipeline.to(SCREAMING_SNAKE_CASE__ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 )
lowerCamelCase__ = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
lowerCamelCase__ = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 510
| 1
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : str = """upernet"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=[1, 2, 3, 6] , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=0.4 , __SCREAMING_SNAKE_CASE : int=384 , __SCREAMING_SNAKE_CASE : str=256 , __SCREAMING_SNAKE_CASE : Optional[Any]=1 , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : List[Any]=255 , **__SCREAMING_SNAKE_CASE : str , ) -> Any:
super().__init__(**__SCREAMING_SNAKE_CASE )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCamelCase_ = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = backbone_config.get('model_type' )
lowerCamelCase_ = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase_ = config_class.from_dict(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = backbone_config
lowerCamelCase_ = hidden_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = pool_scales
lowerCamelCase_ = use_auxiliary_head
lowerCamelCase_ = auxiliary_loss_weight
lowerCamelCase_ = auxiliary_in_channels
lowerCamelCase_ = auxiliary_channels
lowerCamelCase_ = auxiliary_num_convs
lowerCamelCase_ = auxiliary_concat_input
lowerCamelCase_ = loss_ignore_index
def UpperCamelCase ( self : List[Any] ) -> List[str]:
lowerCamelCase_ = copy.deepcopy(self.__dict__ )
lowerCamelCase_ = self.backbone_config.to_dict()
lowerCamelCase_ = self.__class__.model_type
return output
| 137
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class a ( unittest.TestCase ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int=7 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : Tuple=18 , __SCREAMING_SNAKE_CASE : str=30 , __SCREAMING_SNAKE_CASE : Tuple=400 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : str=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Tuple=[0.5, 0.5, 0.5] , ) -> Tuple:
lowerCamelCase_ = size if size is not None else {'shortest_edge': 18}
lowerCamelCase_ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = image_size
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
def UpperCamelCase ( self : int ) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class a ( __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase ( self : Optional[Any] ) -> Tuple:
lowerCamelCase_ = LevitImageProcessingTester(self )
@property
def UpperCamelCase ( self : Any ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self : List[Any] ) -> Tuple:
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_center_crop' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'size' ) )
def UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCamelCase ( self : Optional[Any] ) -> str:
pass
def UpperCamelCase ( self : int ) -> Optional[Any]:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase ( self : List[str] ) -> List[Any]:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 137
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase (A__ ,unittest.TestCase ):
lowerCamelCase__ : Dict = KandinskyVaaControlnetImgaImgPipeline
lowerCamelCase__ : str = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : int = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : List[Any] = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ : int = False
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
return 3_2
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return 3_2
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
return 1_0_0
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(**__UpperCAmelCase )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.dummy_unet
SCREAMING_SNAKE_CASE__ = self.dummy_movq
SCREAMING_SNAKE_CASE__ = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00_085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
SCREAMING_SNAKE_CASE__ = DDIMScheduler(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : Any , __UpperCAmelCase : Any=0 ) -> Tuple:
SCREAMING_SNAKE_CASE__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__UpperCAmelCase )
# create init_image
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
# create hint
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
if str(__UpperCAmelCase ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 1_0,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ = """cpu"""
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowerCamelCase (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
SCREAMING_SNAKE_CASE__ = init_image.resize((5_1_2, 5_1_2) )
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
SCREAMING_SNAKE_CASE__ = torch.from_numpy(np.array(__UpperCAmelCase ) ).float() / 255.0
SCREAMING_SNAKE_CASE__ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ = """A robot, 4k photo"""
SCREAMING_SNAKE_CASE__ = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = pipeline.to(__UpperCAmelCase )
pipeline.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = pipe_prior(
__UpperCAmelCase , image=__UpperCAmelCase , strength=0.85 , generator=__UpperCAmelCase , negative_prompt="""""" , ).to_tuple()
SCREAMING_SNAKE_CASE__ = pipeline(
image=__UpperCAmelCase , image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , hint=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=1_0_0 , height=5_1_2 , width=5_1_2 , strength=0.5 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 196
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
A_ : Optional[Any] = logging.getLogger(__name__)
@dataclass
class lowerCamelCase (A__ ):
lowerCamelCase__ : Optional[float] = field(
default=0.0 ,metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
lowerCamelCase__ : bool = field(default=A__ ,metadata={'help': 'Whether to SortishSamler or not.'} )
lowerCamelCase__ : bool = field(
default=A__ ,metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowerCamelCase__ : bool = field(default=A__ ,metadata={'help': 'whether to use adafactor'} )
lowerCamelCase__ : Optional[float] = field(
default=A__ ,metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
lowerCamelCase__ : Optional[float] = field(
default=A__ ,metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
lowerCamelCase__ : Optional[float] = field(default=A__ ,metadata={'help': 'Dropout probability. Goes into model.config.'} )
lowerCamelCase__ : Optional[float] = field(
default=A__ ,metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
lowerCamelCase__ : Optional[str] = field(
default='linear' ,metadata={'help': f"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} ,)
| 196
| 1
|
"""simple docstring"""
from __future__ import annotations
def A_ ( _lowercase ):
'''simple docstring'''
return [ord(_lowercase ) - 96 for elem in plain]
def A_ ( _lowercase ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def A_ ( ):
'''simple docstring'''
snake_case_ :Optional[int] = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """, _lowercase )
print("""Decoded:""", decode(_lowercase ) )
if __name__ == "__main__":
main()
| 310
|
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def A_ ( ):
'''simple docstring'''
snake_case_ :Tuple = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
snake_case_ :Union[str, Any] = Dataset.from_dict(_lowercase )
return dataset
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]:
snake_case_ :Optional[Any] = get_dataset()
snake_case_ :Any = make_duplicate_clusters(snake_case , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def lowerCAmelCase_ ( self: List[str] ) -> str:
snake_case_ :Optional[int] = get_dataset()
snake_case_, snake_case_ :List[Any] = deduplicate_dataset(snake_case )
self.assertEqual(len(snake_case ) , 2 )
print(snake_case )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , snake_case )
| 310
| 1
|
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
_lowercase = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def _snake_case ( ):
A = Github(os.environ['GITHUB_TOKEN'] )
A = g.get_repo('huggingface/transformers' )
A = repo.get_issues(state='open' )
for issue in open_issues:
A = sorted([comment for comment in issue.get_comments()] , key=lambda snake_case__ : i.created_at , reverse=snake_case__ )
A = comments[0] if len(snake_case__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 91
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Dict = ['image_processor', 'tokenizer']
_snake_case : int = 'BlipImageProcessor'
_snake_case : Tuple = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] ) -> int:
'''simple docstring'''
_UpperCamelCase = False
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = self.image_processor
def __call__( self : int , lowerCAmelCase__ : ImageInput = None , lowerCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_UpperCamelCase = self.tokenizer
_UpperCamelCase = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
return text_encoding
# add pixel_values
_UpperCamelCase = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
if text is not None:
_UpperCamelCase = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
else:
_UpperCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase__ )
return encoding_image_processor
def snake_case__ ( self : Any , *lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : Dict ) -> List[str]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : Tuple , *lowerCAmelCase__ : int , **lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def snake_case__ ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase = self.tokenizer.model_input_names
_UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 98
| 0
|
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 709
|
import collections
import importlib.util
import os
import re
from pathlib import Path
lowerCAmelCase__ = """src/transformers"""
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase__ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase__ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowerCAmelCase__ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase__ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase__ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase__ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase__ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase__ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowerCAmelCase__ = re.compile(R"""^\s*try:""")
# Catches a line with else:
lowerCAmelCase__ = re.compile(R"""^\s*else:""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any ) -> int:
'''simple docstring'''
if _re_test_backend.search(SCREAMING_SNAKE_CASE_ ) is None:
return None
A__ = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE_ )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f:
A__ = f.readlines()
A__ = 0
while line_index < len(SCREAMING_SNAKE_CASE_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
return None
# First grab the objects without a specific backend in _import_structure
A__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
A__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ):
A__ = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ).groups()[0]
A__ = re.findall("\[([^\]]+)\]" , SCREAMING_SNAKE_CASE_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
A__ = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
A__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
A__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
A__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
A__ = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ) is not None:
A__ = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(", " )
A__ = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ) is not None:
A__ = _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(", " )
A__ = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 1_2 + "\"" ):
objects.append(line[1_3:-3] )
line_index += 1
A__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A__ = []
while (
line_index < len(SCREAMING_SNAKE_CASE_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
A__ = lines[line_index]
A__ = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
A__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE_ ):
# If the line is an if is_backend_available, we grab all objects associated.
A__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
A__ = lines[line_index]
A__ = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
A__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: List[Any] ) -> Optional[int]:
'''simple docstring'''
def find_duplicates(SCREAMING_SNAKE_CASE_: str ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A__ = []
for key in import_dict_objects.keys():
A__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
A__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A__ = "base imports" if key == "none" else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def lowerCAmelCase__ ( ) -> Dict:
'''simple docstring'''
A__ = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ):
if "__init__.py" in files:
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , "__init__.py" )
A__ = parse_init(SCREAMING_SNAKE_CASE_ )
if objects is not None:
A__ = analyze_results(*SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
A__ = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append("\n".join(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError("\n\n".join(SCREAMING_SNAKE_CASE_ ) )
def lowerCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
A__ = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(SCREAMING_SNAKE_CASE_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE_ ) / folder).glob("*.py" ) ) ) == 0:
continue
A__ = str((Path(SCREAMING_SNAKE_CASE_ ) / folder).relative_to(SCREAMING_SNAKE_CASE_ ) )
A__ = short_path.replace(os.path.sep , "." )
submodules.append(SCREAMING_SNAKE_CASE_ )
for fname in files:
if fname == "__init__.py":
continue
A__ = str((Path(SCREAMING_SNAKE_CASE_ ) / fname).relative_to(SCREAMING_SNAKE_CASE_ ) )
A__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE_ )
return submodules
lowerCAmelCase__ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def lowerCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
A__ = importlib.util.spec_from_file_location(
"transformers" , os.path.join(SCREAMING_SNAKE_CASE_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
A__ = spec.loader.load_module()
A__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
A__ = "\n".join(F'- {module}' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F'{list_of_modules}\n'
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 626
| 0
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
SCREAMING_SNAKE_CASE_ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE__ : Tuple = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
SCREAMING_SNAKE_CASE__ : Any = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
SCREAMING_SNAKE_CASE__ : Any = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case : List[Any] = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' )
_snake_case : List[str] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(snake_case ) , [{'label': 'LABEL_0', 'score': 0.504}] )
_snake_case : Any = text_classifier('This is great !' , top_k=2 )
self.assertEqual(
nested_simplify(snake_case ) , [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}] )
_snake_case : List[str] = text_classifier(['This is great !', 'This is bad'] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case ) , [
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
] , )
_snake_case : Optional[int] = text_classifier('This is great !' , top_k=1 )
self.assertEqual(nested_simplify(snake_case ) , [{'label': 'LABEL_0', 'score': 0.504}] )
# Legacy behavior
_snake_case : str = text_classifier('This is great !' , return_all_scores=snake_case )
self.assertEqual(nested_simplify(snake_case ) , [{'label': 'LABEL_0', 'score': 0.504}] )
_snake_case : Dict = text_classifier('This is great !' , return_all_scores=snake_case )
self.assertEqual(
nested_simplify(snake_case ) , [[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}]] )
_snake_case : Optional[Any] = text_classifier(['This is great !', 'Something else'] , return_all_scores=snake_case )
self.assertEqual(
nested_simplify(snake_case ) , [
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
] , )
_snake_case : Any = text_classifier(['This is great !', 'Something else'] , return_all_scores=snake_case )
self.assertEqual(
nested_simplify(snake_case ) , [
{'label': 'LABEL_0', 'score': 0.504},
{'label': 'LABEL_0', 'score': 0.504},
] , )
@require_torch
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
import torch
_snake_case : Optional[Any] = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu' ) , )
_snake_case : Optional[int] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(snake_case ) , [{'label': 'LABEL_0', 'score': 0.504}] )
@require_tf
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case : List[Any] = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf' )
_snake_case : str = text_classifier('This is great !' )
self.assertEqual(nested_simplify(snake_case ) , [{'label': 'LABEL_0', 'score': 0.504}] )
@slow
@require_torch
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case : int = pipeline('text-classification' )
_snake_case : Optional[Any] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(snake_case ) , [{'label': 'POSITIVE', 'score': 1.0}] )
_snake_case : Union[str, Any] = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(snake_case ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
_snake_case : Dict = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(snake_case ) , [{'label': 'POSITIVE', 'score': 0.988}] )
@slow
@require_tf
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case : Optional[int] = pipeline('text-classification' , framework='tf' )
_snake_case : Any = text_classifier('This is great !' )
self.assertEqual(nested_simplify(snake_case ) , [{'label': 'POSITIVE', 'score': 1.0}] )
_snake_case : Optional[int] = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(snake_case ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
_snake_case : Dict = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(snake_case ) , [{'label': 'POSITIVE', 'score': 0.988}] )
def __UpperCAmelCase ( self : Union[str, Any] , snake_case : str , snake_case : Optional[int] , snake_case : Union[str, Any] ):
"""simple docstring"""
_snake_case : Optional[int] = TextClassificationPipeline(model=snake_case , tokenizer=snake_case )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __UpperCAmelCase ( self : Tuple , snake_case : Any , snake_case : Dict ):
"""simple docstring"""
_snake_case : str = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
_snake_case : Optional[int] = 'HuggingFace is in'
_snake_case : Any = text_classifier(snake_case )
self.assertEqual(nested_simplify(snake_case ) , [{'label': ANY(snake_case ), 'score': ANY(snake_case )}] )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
_snake_case : Tuple = ['HuggingFace is in ', 'Paris is in France']
_snake_case : Union[str, Any] = text_classifier(snake_case )
self.assertEqual(
nested_simplify(snake_case ) , [{'label': ANY(snake_case ), 'score': ANY(snake_case )}, {'label': ANY(snake_case ), 'score': ANY(snake_case )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
_snake_case : Optional[int] = text_classifier(snake_case , top_k=snake_case )
_snake_case : List[Any] = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(snake_case ) , [[{'label': ANY(snake_case ), 'score': ANY(snake_case )}] * N, [{'label': ANY(snake_case ), 'score': ANY(snake_case )}] * N] , )
_snake_case : Dict = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'}
_snake_case : Optional[Any] = text_classifier(snake_case )
self.assertEqual(
nested_simplify(snake_case ) , {'label': ANY(snake_case ), 'score': ANY(snake_case )} , )
self.assertTrue(outputs['label'] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
_snake_case : Optional[Any] = [['HuggingFace is in ', 'Paris is in France']]
with self.assertRaises(snake_case ):
text_classifier(snake_case )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
_snake_case : List[str] = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] )
self.assertEqual(
nested_simplify(snake_case ) , [{'label': ANY(snake_case ), 'score': ANY(snake_case )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
| 517
|
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
SCREAMING_SNAKE_CASE_ = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
SCREAMING_SNAKE_CASE_ = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
SCREAMING_SNAKE_CASE_ = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def __UpperCAmelCase ( self : Optional[int] , snake_case : str , snake_case : int , snake_case : Dict=None , snake_case : List[str]="uniform_average" , snake_case : Any=True ):
"""simple docstring"""
_snake_case : str = mean_squared_error(
snake_case , snake_case , sample_weight=snake_case , multioutput=snake_case , squared=snake_case )
return {"mse": mse}
| 517
| 1
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _A ( __lowerCamelCase ):
'''simple docstring'''
_snake_case : Optional[Any] = ["""image_processor""", """tokenizer"""]
_snake_case : List[Any] = """AutoImageProcessor"""
_snake_case : Tuple = """AutoTokenizer"""
def __init__( self : str , lowerCamelCase : Tuple , lowerCamelCase : Tuple ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
__lowercase = self.image_processor
def __call__( self : List[Any] , lowerCamelCase : Tuple=None , lowerCamelCase : Any=None , lowerCamelCase : Optional[int]=None , **lowerCamelCase : Any ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__lowercase = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if images is not None:
__lowercase = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ )
def _snake_case ( self : Dict , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _snake_case ( self : str , *lowerCamelCase : List[Any] , **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def _snake_case ( self : Any ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 706
|
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
snake_case__ : Union[str, Any] = TypeVar("""T""")
snake_case__ : Optional[int] = TypeVar("""U""")
class _A ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : T | None , lowerCamelCase : U | None ):
'''simple docstring'''
__lowercase = key
__lowercase = val
__lowercase = None
__lowercase = None
def __repr__( self : Any ):
'''simple docstring'''
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class _A ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Dict ):
'''simple docstring'''
__lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
__lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
__lowercase , __lowercase = self.rear, self.head
def __repr__( self : Optional[Any] ):
'''simple docstring'''
__lowercase = ["DoubleLinkedList"]
__lowercase = self.head
while node.next is not None:
rep.append(str(lowerCamelCase ) )
__lowercase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
__lowercase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__lowercase = node
__lowercase = previous
__lowercase = node
__lowercase = self.rear
def _snake_case ( self : Optional[int] , lowerCamelCase : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
if node.prev is None or node.next is None:
return None
__lowercase = node.next
__lowercase = node.prev
__lowercase = None
__lowercase = None
return node
class _A ( Generic[T, U] ):
'''simple docstring'''
_snake_case : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : List[Any] , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = DoubleLinkedList()
__lowercase = capacity
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = {}
def __repr__( self : Optional[Any] ):
'''simple docstring'''
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self : Dict , lowerCamelCase : T ):
'''simple docstring'''
return key in self.cache
def _snake_case ( self : List[Any] , lowerCamelCase : T ):
'''simple docstring'''
if key in self.cache:
self.hits += 1
__lowercase = self.cache[key]
__lowercase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCamelCase )
return node.val
self.miss += 1
return None
def _snake_case ( self : Union[str, Any] , lowerCamelCase : T , lowerCamelCase : U ):
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__lowercase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCamelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__lowercase = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__lowercase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__lowercase = value
self.list.add(lowerCamelCase )
@classmethod
def _snake_case ( cls : Union[str, Any] , lowerCamelCase : int = 128 ):
'''simple docstring'''
def cache_decorator_inner(lowerCamelCase : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCamelCase : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
__lowercase = LRUCache(lowerCamelCase )
__lowercase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__lowercase = func(*lowerCamelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , lowerCamelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCamelCase , "cache_info" , lowerCamelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655
| 0
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCamelCase__ ( snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _UpperCamelCase ( self ,A=0 ):
UpperCAmelCase = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A ) )
UpperCAmelCase = np.random.RandomState(A )
UpperCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _UpperCamelCase ( self ):
UpperCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase = self.get_dummy_inputs()
UpperCAmelCase = pipe(**A ).images
UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _UpperCamelCase ( self ):
UpperCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
UpperCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase = self.get_dummy_inputs()
UpperCAmelCase = pipe(**A ).images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCamelCase ( self ):
UpperCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
UpperCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
# warmup pass to apply optimizations
UpperCAmelCase = pipe(**self.get_dummy_inputs() )
UpperCAmelCase = self.get_dummy_inputs()
UpperCAmelCase = pipe(**A ).images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCamelCase ( self ):
UpperCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
UpperCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase = self.get_dummy_inputs()
UpperCAmelCase = pipe(**A ).images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCamelCase ( self ):
UpperCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
UpperCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase = self.get_dummy_inputs()
UpperCAmelCase = pipe(**A ).images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCamelCase ( self ):
UpperCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase = self.get_dummy_inputs()
UpperCAmelCase = pipe(**A ).images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
@property
def _UpperCamelCase ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCamelCase ( self ):
UpperCAmelCase = ort.SessionOptions()
UpperCAmelCase = False
return options
def _UpperCamelCase ( self ):
UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
UpperCAmelCase = init_image.resize((768, 512) )
# using the PNDM scheduler by default
UpperCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" ,revision="""onnx""" ,safety_checker=A ,feature_extractor=A ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase = """A fantasy landscape, trending on artstation"""
UpperCAmelCase = np.random.RandomState(0 )
UpperCAmelCase = pipe(
prompt=A ,image=A ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A ,output_type="""np""" ,)
UpperCAmelCase = output.images
UpperCAmelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _UpperCamelCase ( self ):
UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
UpperCAmelCase = init_image.resize((768, 512) )
UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,subfolder="""scheduler""" ,revision="""onnx""" )
UpperCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,scheduler=A ,safety_checker=A ,feature_extractor=A ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase = """A fantasy landscape, trending on artstation"""
UpperCAmelCase = np.random.RandomState(0 )
UpperCAmelCase = pipe(
prompt=A ,image=A ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A ,output_type="""np""" ,)
UpperCAmelCase = output.images
UpperCAmelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 341
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""",
"""funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""",
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""",
"""funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""",
}
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = '''funnel'''
SCREAMING_SNAKE_CASE = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self ,A=30_522 ,A=[4, 4, 4] ,A=None ,A=2 ,A=768 ,A=12 ,A=64 ,A=3_072 ,A="gelu_new" ,A=0.1 ,A=0.1 ,A=0.0 ,A=0.1 ,A=None ,A=1e-9 ,A="mean" ,A="relative_shift" ,A=True ,A=True ,A=True ,**A ,):
UpperCAmelCase = vocab_size
UpperCAmelCase = block_sizes
UpperCAmelCase = [1] * len(A ) if block_repeats is None else block_repeats
assert len(A ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
UpperCAmelCase = num_decoder_layers
UpperCAmelCase = d_model
UpperCAmelCase = n_head
UpperCAmelCase = d_head
UpperCAmelCase = d_inner
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = initializer_range
UpperCAmelCase = initializer_std
UpperCAmelCase = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
UpperCAmelCase = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
UpperCAmelCase = attention_type
UpperCAmelCase = separate_cls
UpperCAmelCase = truncate_seq
UpperCAmelCase = pool_q_only
super().__init__(**A )
@property
def _UpperCamelCase ( self ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def _UpperCamelCase ( self ,A ):
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" )
@property
def _UpperCamelCase ( self ):
return len(self.block_sizes )
@num_blocks.setter
def _UpperCamelCase ( self ,A ):
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
| 341
| 1
|
UpperCamelCase__ = {
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
UpperCamelCase__ = {
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def _UpperCamelCase (a__ :float , a__ :str , a__ :str ):
"""simple docstring"""
UpperCamelCase__ = from_type.lower().strip("""s""" )
UpperCamelCase__ = to_type.lower().strip("""s""" )
UpperCamelCase__ = UNIT_SYMBOL.get(a__ , a__ )
UpperCamelCase__ = UNIT_SYMBOL.get(a__ , a__ )
if from_sanitized not in METRIC_CONVERSION:
UpperCamelCase__ = (
f"""Invalid 'from_type' value: {from_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
if to_sanitized not in METRIC_CONVERSION:
UpperCamelCase__ = (
f"""Invalid 'to_type' value: {to_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
UpperCamelCase__ = METRIC_CONVERSION[from_sanitized]
UpperCamelCase__ = METRIC_CONVERSION[to_sanitized]
UpperCamelCase__ = 1
if from_exponent > to_exponent:
UpperCamelCase__ = from_exponent - to_exponent
else:
UpperCamelCase__ = -(to_exponent - from_exponent)
return value * pow(10 , a__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 548
|
UpperCamelCase__ = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = input("""Enter message: """ )
UpperCamelCase__ = input("""Enter key [alphanumeric]: """ )
UpperCamelCase__ = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
UpperCamelCase__ = """encrypt"""
UpperCamelCase__ = encrypt_message(a__ , a__ )
elif mode.lower().startswith("""d""" ):
UpperCamelCase__ = """decrypt"""
UpperCamelCase__ = decrypt_message(a__ , a__ )
print(f"""\n{mode.title()}ed message:""" )
print(a__ )
def _UpperCamelCase (a__ :str , a__ :str ):
"""simple docstring"""
return translate_message(a__ , a__ , """encrypt""" )
def _UpperCamelCase (a__ :str , a__ :str ):
"""simple docstring"""
return translate_message(a__ , a__ , """decrypt""" )
def _UpperCamelCase (a__ :str , a__ :str , a__ :str ):
"""simple docstring"""
UpperCamelCase__ = []
UpperCamelCase__ = 0
UpperCamelCase__ = key.upper()
for symbol in message:
UpperCamelCase__ = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(a__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(a__ ):
UpperCamelCase__ = 0
else:
translated.append(a__ )
return "".join(a__ )
if __name__ == "__main__":
main()
| 548
| 1
|
"""simple docstring"""
import string
def A_ ( snake_case__ ) -> str:
_UpperCamelCase :int = ''''''
for i in sequence:
_UpperCamelCase :Optional[Any] = ord(snake_case_ )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def A_ ( snake_case__ ) -> str:
_UpperCamelCase :List[Any] = string.ascii_letters
_UpperCamelCase :List[Any] = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(snake_case_ )] if c in letters else c for c in sequence )
def A_ ( ) -> None:
from timeit import timeit
print('''Running performance benchmarks...''' )
_UpperCamelCase :Any = '''from string import printable ; from __main__ import atbash, atbash_slow'''
print(f"> atbash_slow(): {timeit('atbash_slow(printable)' , setup=snake_case_ )} seconds" )
print(f"> atbash(): {timeit('atbash(printable)' , setup=snake_case_ )} seconds" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 355
|
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> bool:
A__ : List[Any] =len(snake_case_ ) + 1
A__ : List[Any] =len(snake_case_ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
A__ : Dict =[[0 for i in range(snake_case_ )] for j in range(snake_case_ )]
# since string of zero length match pattern of zero length
A__ : Dict =1
# since pattern of zero length will never match with string of non-zero length
for i in range(1, snake_case_ ):
A__ : Optional[Any] =0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1, snake_case_ ):
A__ : str =dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1, snake_case_ ):
for j in range(1, snake_case_ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
A__ : str =dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
A__ : str =1
elif pattern[j - 2] in (input_string[i - 1], "."):
A__ : Union[str, Any] =dp[i - 1][j]
else:
A__ : Optional[int] =0
else:
A__ : str =0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__lowerCamelCase : int = "aab"
__lowerCamelCase : Dict = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"{input_string} matches the given pattern {pattern}")
else:
print(F"{input_string} does not match with the given pattern {pattern}")
| 416
| 0
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCAmelCase_ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCAmelCase_ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCAmelCase_ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> tuple[str, float]:
'''simple docstring'''
_snake_case = len([g for position, g in enumerate(UpperCamelCase__ ) if g == main_target[position]] )
return (item, float(UpperCamelCase__ ))
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> tuple[str, str]:
'''simple docstring'''
_snake_case = random.randint(0 , len(UpperCamelCase__ ) - 1 )
_snake_case = parent_a[:random_slice] + parent_a[random_slice:]
_snake_case = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : list[str] ) -> str:
'''simple docstring'''
_snake_case = list(UpperCamelCase__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_snake_case = random.choice(UpperCamelCase__ )
return "".join(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : tuple[str, float] , UpperCamelCase__ : list[tuple[str, float]] , UpperCamelCase__ : list[str] , ) -> list[str]:
'''simple docstring'''
_snake_case = []
# Generate more children proportionally to the fitness score.
_snake_case = int(parent_a[1] * 100 ) + 1
_snake_case = 10 if child_n >= 10 else child_n
for _ in range(UpperCamelCase__ ):
_snake_case = population_score[random.randint(0 , UpperCamelCase__ )][0]
_snake_case , _snake_case = crossover(parent_a[0] , UpperCamelCase__ )
# Append new string to the population list.
pop.append(mutate(UpperCamelCase__ , UpperCamelCase__ ) )
pop.append(mutate(UpperCamelCase__ , UpperCamelCase__ ) )
return pop
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : list[str] , UpperCamelCase__ : bool = True ) -> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
_snake_case = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(UpperCamelCase__ )
# Verify that the target contains no genes besides the ones inside genes variable.
_snake_case = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_snake_case = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(UpperCamelCase__ )
# Generate random starting population.
_snake_case = []
for _ in range(UpperCamelCase__ ):
population.append(''.join([random.choice(UpperCamelCase__ ) for i in range(len(UpperCamelCase__ ) )] ) )
# Just some logs to know what the algorithms is doing.
_snake_case , _snake_case = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(UpperCamelCase__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_snake_case = [evaluate(UpperCamelCase__ , UpperCamelCase__ ) for item in population]
# Check if there is a matching evolution.
_snake_case = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] , reverse=UpperCamelCase__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_snake_case = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(UpperCamelCase__ )
# Normalize population score to be between 0 and 1.
_snake_case = [
(item, score / len(UpperCamelCase__ )) for item, score in population_score
]
# This is selection
for i in range(UpperCamelCase__ ):
population.extend(select(population_score[int(UpperCamelCase__ )] , UpperCamelCase__ , UpperCamelCase__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(UpperCamelCase__ ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCAmelCase_ = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
UpperCAmelCase_ = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"""
)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = basic(target_str, genes_list)
print(
F"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
)
| 541
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=10 , lowerCAmelCase_=3 , lowerCAmelCase_=2 , lowerCAmelCase_=2 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=10 , lowerCAmelCase_=0.02 , lowerCAmelCase_="divided_space_time" , lowerCAmelCase_=None , ) -> Optional[Any]:
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = num_channels
_snake_case = patch_size
_snake_case = num_frames
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = attention_type
_snake_case = initializer_range
_snake_case = scope
_snake_case = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
_snake_case = (image_size // patch_size) ** 2
_snake_case = (num_frames) * self.num_patches_per_frame + 1
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self ) -> str:
_snake_case = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
_snake_case = self.num_labels
return config
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_snake_case = TimesformerModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_snake_case = TimesformerForVideoClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ )
# verify the logits shape
_snake_case = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Any:
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowerCAmelCase_ = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = TimesformerModelTester(self )
_snake_case = ConfigTester(
self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Any:
_snake_case = copy.deepcopy(lowerCAmelCase_ )
if return_labels:
if model_class in get_values(lowerCAmelCase_ ):
_snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def lowerCAmelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds' )
def lowerCAmelCase ( self ) -> Tuple:
pass
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self ) -> List[Any]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TimesformerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Union[str, Any]:
if not self.has_attentions:
pass
else:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
for model_class in self.all_model_classes:
_snake_case = self.model_tester.seq_length
_snake_case = self.model_tester.num_frames
_snake_case = True
_snake_case = False
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
_snake_case = len(lowerCAmelCase_ )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertEqual(out_len + 1 , len(lowerCAmelCase_ ) )
_snake_case = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowerCAmelCase ( self ) -> Dict:
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case = outputs.hidden_states
_snake_case = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
_snake_case = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase__ ( ) -> Tuple:
'''simple docstring'''
_snake_case = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_snake_case = np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self ) -> Dict:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to(
lowerCAmelCase_ )
_snake_case = self.default_image_processor
_snake_case = prepare_video()
_snake_case = image_processor(video[:8] , return_tensors='pt' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_snake_case = model(**lowerCAmelCase_ )
# verify the logits
_snake_case = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_snake_case = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 541
| 1
|
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 65521
def _UpperCamelCase ( lowerCAmelCase__: str ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 0
for plain_chr in plain_text:
SCREAMING_SNAKE_CASE_ = (a + ord(lowerCAmelCase__ )) % MOD_ADLER
SCREAMING_SNAKE_CASE_ = (b + a) % MOD_ADLER
return (b << 16) | a
| 294
|
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''detr'''
__SCREAMING_SNAKE_CASE = ['''past_key_values''']
__SCREAMING_SNAKE_CASE = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=3 , lowerCamelCase=1_00 , lowerCamelCase=6 , lowerCamelCase=20_48 , lowerCamelCase=8 , lowerCamelCase=6 , lowerCamelCase=20_48 , lowerCamelCase=8 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="relu" , lowerCamelCase=2_56 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1.0 , lowerCamelCase=False , lowerCamelCase="sine" , lowerCamelCase="resnet50" , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=1 , lowerCamelCase=5 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=1 , lowerCamelCase=5 , lowerCamelCase=2 , lowerCamelCase=0.1 , **lowerCamelCase , ) -> int:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCamelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
UpperCamelCase : Dict = backbone_config.get("model_type" )
UpperCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase : Dict = config_class.from_dict(lowerCamelCase )
# set timm attributes to None
UpperCamelCase , UpperCamelCase , UpperCamelCase : int = None, None, None
UpperCamelCase : str = use_timm_backbone
UpperCamelCase : int = backbone_config
UpperCamelCase : List[Any] = num_channels
UpperCamelCase : int = num_queries
UpperCamelCase : List[Any] = d_model
UpperCamelCase : Union[str, Any] = encoder_ffn_dim
UpperCamelCase : Optional[int] = encoder_layers
UpperCamelCase : Tuple = encoder_attention_heads
UpperCamelCase : Tuple = decoder_ffn_dim
UpperCamelCase : int = decoder_layers
UpperCamelCase : Dict = decoder_attention_heads
UpperCamelCase : Optional[int] = dropout
UpperCamelCase : List[Any] = attention_dropout
UpperCamelCase : Tuple = activation_dropout
UpperCamelCase : Any = activation_function
UpperCamelCase : List[Any] = init_std
UpperCamelCase : List[Any] = init_xavier_std
UpperCamelCase : List[str] = encoder_layerdrop
UpperCamelCase : Optional[int] = decoder_layerdrop
UpperCamelCase : Any = encoder_layers
UpperCamelCase : List[Any] = auxiliary_loss
UpperCamelCase : Dict = position_embedding_type
UpperCamelCase : Union[str, Any] = backbone
UpperCamelCase : Tuple = use_pretrained_backbone
UpperCamelCase : Any = dilation
# Hungarian matcher
UpperCamelCase : List[str] = class_cost
UpperCamelCase : Optional[Any] = bbox_cost
UpperCamelCase : Optional[Any] = giou_cost
# Loss coefficients
UpperCamelCase : Optional[int] = mask_loss_coefficient
UpperCamelCase : Dict = dice_loss_coefficient
UpperCamelCase : Tuple = bbox_loss_coefficient
UpperCamelCase : Tuple = giou_loss_coefficient
UpperCamelCase : Optional[int] = eos_coefficient
super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return self.d_model
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , lowerCamelCase , **lowerCamelCase ) -> Any:
'''simple docstring'''
return cls(backbone_config=lowerCamelCase , **lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict[str, any]:
'''simple docstring'''
UpperCamelCase : Tuple = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCamelCase : Tuple = self.backbone_config.to_dict()
UpperCamelCase : Dict = self.__class__.model_type
return output
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
'''simple docstring'''
return 1e-5
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return 12
| 173
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : Tuple = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 701
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A_ : List[str] = logging.get_logger(__name__)
A_ : Optional[Any] = "▁"
A_ : Optional[Any] = {"vocab_file": "sentencepiece.bpe.model"}
A_ : Dict = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
A_ : Optional[int] = {
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
A_ : Union[str, Any] = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = ['''input_ids''', '''attention_mask''']
lowerCamelCase__ = []
lowerCamelCase__ = []
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : Optional[Any] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
snake_case__ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case__ : Union[str, Any] = legacy_behaviour
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
snake_case__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
snake_case__ : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case__ : Dict = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case__ : Union[str, Any] = 1
snake_case__ : List[Any] = len(self.sp_model )
snake_case__ : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__SCREAMING_SNAKE_CASE )
}
snake_case__ : Any = {v: k for k, v in self.lang_code_to_id.items()}
snake_case__ : List[str] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
snake_case__ : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
snake_case__ : Union[str, Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
snake_case__ : List[str] = src_lang if src_lang is not None else """eng_Latn"""
snake_case__ : Optional[int] = self.lang_code_to_id[self._src_lang]
snake_case__ : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
snake_case__ : List[str] = self.__dict__.copy()
snake_case__ : Any = None
snake_case__ : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case__ : Any = {}
snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __UpperCamelCase ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = [1] * len(self.prefix_tokens )
snake_case__ : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
snake_case__ : Dict = [self.sep_token_id]
snake_case__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case__ : List[Any] = src_lang
snake_case__ : Optional[Any] = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tgt_lang_id
return inputs
def __UpperCamelCase ( self ):
snake_case__ : int = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case__ : Optional[int] = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = """""".join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case__ : int = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , """wb""" ) as fi:
snake_case__ : int = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "eng_Latn" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "fra_Latn" , **__SCREAMING_SNAKE_CASE , ):
snake_case__ : Optional[int] = src_lang
snake_case__ : Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
snake_case__ : Tuple = []
snake_case__ : int = [self.eos_token_id, self.cur_lang_code]
else:
snake_case__ : Optional[int] = [self.cur_lang_code]
snake_case__ : int = [self.eos_token_id]
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = self.lang_code_to_id[lang]
if self.legacy_behaviour:
snake_case__ : str = []
snake_case__ : int = [self.eos_token_id, self.cur_lang_code]
else:
snake_case__ : List[Any] = [self.cur_lang_code]
snake_case__ : Union[str, Any] = [self.eos_token_id]
| 419
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCAmelCase ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : int = KandinskyImgaImgPipeline
_snake_case : List[Any] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
_snake_case : Union[str, Any] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
_snake_case : Any = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_snake_case : List[Any] = False
@property
def A ( self : Any )-> int:
return 32
@property
def A ( self : Tuple )-> List[str]:
return 32
@property
def A ( self : Optional[int] )-> Union[str, Any]:
return self.time_input_dim
@property
def A ( self : str )-> Optional[int]:
return self.time_input_dim * 4
@property
def A ( self : Optional[int] )-> List[str]:
return 1_00
@property
def A ( self : List[str] )-> Optional[Any]:
__UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def A ( self : Dict )-> Any:
torch.manual_seed(0 )
__UpperCamelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__UpperCamelCase = MultilingualCLIP(A_ )
__UpperCamelCase = text_encoder.eval()
return text_encoder
@property
def A ( self : int )-> str:
torch.manual_seed(0 )
__UpperCamelCase = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__UpperCamelCase = UNetaDConditionModel(**A_ )
return model
@property
def A ( self : List[str] )-> List[str]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A ( self : Optional[Any] )-> str:
torch.manual_seed(0 )
__UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def A ( self : Union[str, Any] )-> str:
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = self.dummy_tokenizer
__UpperCamelCase = self.dummy_unet
__UpperCamelCase = self.dummy_movq
__UpperCamelCase = {
"num_train_timesteps": 10_00,
"beta_schedule": "linear",
"beta_start": 0.00_085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
__UpperCamelCase = DDIMScheduler(**A_ )
__UpperCamelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def A ( self : Union[str, Any] , A_ : Dict , A_ : int=0 )-> Optional[Any]:
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(A_ )
# create init_image
__UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((2_56, 2_56) )
if str(A_ ).startswith("mps" ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def A ( self : Union[str, Any] )-> Dict:
__UpperCamelCase = "cpu"
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**A_ )
__UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = pipe(**self.get_dummy_inputs(A_ ) )
__UpperCamelCase = output.images
__UpperCamelCase = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Optional[int] )-> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Optional[int] )-> Optional[int]:
__UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
__UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__UpperCamelCase = "A red cartoon frog, 4k"
__UpperCamelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
__UpperCamelCase = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
__UpperCamelCase = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
__UpperCamelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCamelCase , __UpperCamelCase = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__UpperCamelCase = pipeline(
A_ , image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="np" , )
__UpperCamelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(A_ , A_ )
| 505
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case__ )
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : str = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
_snake_case : ClassVar[Features] = Features({'text': Value('string' )} )
_snake_case : ClassVar[Features] = Features({'summary': Value('string' )} )
_snake_case : str = "text"
_snake_case : str = "summary"
@property
def A ( self : Any )-> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 505
| 1
|
"""simple docstring"""
from collections import defaultdict
def A( snake_case_ , snake_case_ ):
"""simple docstring"""
lowercase__: List[Any] = first_str.lower().strip()
lowercase__: List[Any] = second_str.lower().strip()
# Remove whitespace
lowercase__: Optional[Any] = first_str.replace(" " , "" )
lowercase__: Union[str, Any] = second_str.replace(" " , "" )
# Strings of different lengths are not anagrams
if len(snake_case_ ) != len(snake_case_ ):
return False
# Default values for count should be 0
lowercase__: defaultdict[str, int] = defaultdict(snake_case_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(snake_case_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase = input("""Enter the first string """).strip()
UpperCamelCase = input("""Enter the second string """).strip()
UpperCamelCase = check_anagrams(input_a, input_b)
print(F"{input_a} and {input_b} are {'' if status else 'not '}anagrams.")
| 120
|
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def A( snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) ):
"""simple docstring"""
lowercase__: Dict = tau * frequency / samplerate
lowercase__: List[str] = sin(snake_case_ )
lowercase__: Union[str, Any] = cos(snake_case_ )
lowercase__: Optional[Any] = _sin / (2 * q_factor)
lowercase__: int = (1 - _cos) / 2
lowercase__: Tuple = 1 - _cos
lowercase__: List[Any] = 1 + alpha
lowercase__: Any = -2 * _cos
lowercase__: Dict = 1 - alpha
lowercase__: Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A( snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) ):
"""simple docstring"""
lowercase__: str = tau * frequency / samplerate
lowercase__: Dict = sin(snake_case_ )
lowercase__: Dict = cos(snake_case_ )
lowercase__: Tuple = _sin / (2 * q_factor)
lowercase__: Any = (1 + _cos) / 2
lowercase__: str = -1 - _cos
lowercase__: Any = 1 + alpha
lowercase__: List[str] = -2 * _cos
lowercase__: Optional[Any] = 1 - alpha
lowercase__: Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A( snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) ):
"""simple docstring"""
lowercase__: List[Any] = tau * frequency / samplerate
lowercase__: Optional[int] = sin(snake_case_ )
lowercase__: List[Any] = cos(snake_case_ )
lowercase__: Any = _sin / (2 * q_factor)
lowercase__: Any = _sin / 2
lowercase__: Optional[Any] = 0
lowercase__: Any = -ba
lowercase__: Optional[int] = 1 + alpha
lowercase__: Any = -2 * _cos
lowercase__: Any = 1 - alpha
lowercase__: Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A( snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) ):
"""simple docstring"""
lowercase__: List[str] = tau * frequency / samplerate
lowercase__: Tuple = sin(snake_case_ )
lowercase__: List[str] = cos(snake_case_ )
lowercase__: Union[str, Any] = _sin / (2 * q_factor)
lowercase__: List[str] = 1 - alpha
lowercase__: Optional[Any] = -2 * _cos
lowercase__: str = 1 + alpha
lowercase__: List[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def A( snake_case_ , snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) , ):
"""simple docstring"""
lowercase__: Tuple = tau * frequency / samplerate
lowercase__: Tuple = sin(snake_case_ )
lowercase__: Optional[Any] = cos(snake_case_ )
lowercase__: str = _sin / (2 * q_factor)
lowercase__: Optional[Any] = 10 ** (gain_db / 40)
lowercase__: Union[str, Any] = 1 + alpha * big_a
lowercase__: str = -2 * _cos
lowercase__: Tuple = 1 - alpha * big_a
lowercase__: Union[str, Any] = 1 + alpha / big_a
lowercase__: Dict = -2 * _cos
lowercase__: Optional[Any] = 1 - alpha / big_a
lowercase__: Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A( snake_case_ , snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) , ):
"""simple docstring"""
lowercase__: Optional[Any] = tau * frequency / samplerate
lowercase__: Union[str, Any] = sin(snake_case_ )
lowercase__: Optional[Any] = cos(snake_case_ )
lowercase__: Optional[int] = _sin / (2 * q_factor)
lowercase__: Optional[int] = 10 ** (gain_db / 40)
lowercase__: List[Any] = (big_a + 1) - (big_a - 1) * _cos
lowercase__: Union[str, Any] = (big_a + 1) + (big_a - 1) * _cos
lowercase__: Any = (big_a - 1) - (big_a + 1) * _cos
lowercase__: str = (big_a - 1) + (big_a + 1) * _cos
lowercase__: int = 2 * sqrt(snake_case_ ) * alpha
lowercase__: Union[str, Any] = big_a * (pmc + aaa)
lowercase__: List[Any] = 2 * big_a * mpc
lowercase__: Dict = big_a * (pmc - aaa)
lowercase__: Dict = ppmc + aaa
lowercase__: List[str] = -2 * pmpc
lowercase__: int = ppmc - aaa
lowercase__: Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A( snake_case_ , snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) , ):
"""simple docstring"""
lowercase__: List[str] = tau * frequency / samplerate
lowercase__: Dict = sin(snake_case_ )
lowercase__: Optional[Any] = cos(snake_case_ )
lowercase__: Tuple = _sin / (2 * q_factor)
lowercase__: int = 10 ** (gain_db / 40)
lowercase__: Dict = (big_a + 1) - (big_a - 1) * _cos
lowercase__: Optional[int] = (big_a + 1) + (big_a - 1) * _cos
lowercase__: Tuple = (big_a - 1) - (big_a + 1) * _cos
lowercase__: Dict = (big_a - 1) + (big_a + 1) * _cos
lowercase__: Dict = 2 * sqrt(snake_case_ ) * alpha
lowercase__: Optional[int] = big_a * (ppmc + aaa)
lowercase__: Dict = -2 * big_a * pmpc
lowercase__: Dict = big_a * (ppmc - aaa)
lowercase__: Tuple = pmc + aaa
lowercase__: Optional[int] = 2 * mpc
lowercase__: Union[str, Any] = pmc - aaa
lowercase__: Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 120
| 1
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=3_2 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , ) -> Union[str, Any]:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self) -> List[str]:
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=lowerCamelCase_ , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
UpperCamelCase = FalconModel(config=lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_)
UpperCamelCase = model(lowerCamelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> str:
UpperCamelCase = True
UpperCamelCase = FalconModel(lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
UpperCamelCase = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , )
UpperCamelCase = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , )
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Tuple:
UpperCamelCase = FalconForCausalLM(config=lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Optional[Any]:
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = FalconForCausalLM(config=lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
# first forward pass
UpperCamelCase = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , use_cache=lowerCamelCase_ , )
UpperCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size)
UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1)
UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1)
UpperCamelCase = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , )["""hidden_states"""][0]
UpperCamelCase = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , )["""hidden_states"""][0]
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1]).item()
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3))
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = self.prepare_config_and_inputs()
(
UpperCamelCase
) = config_and_inputs
UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class snake_case_ ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ = (FalconForCausalLM,) if is_torch_available() else ()
A_ = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = FalconModelTester(self)
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=3_7)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
UpperCamelCase = alibi
self.model_tester.create_and_check_model(lowerCamelCase_ , *lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = input_dict["""input_ids"""]
UpperCamelCase = input_ids.ne(1).to(lowerCamelCase_)
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
UpperCamelCase = FalconForSequenceClassification(lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = """single_label_classification"""
UpperCamelCase = input_dict["""input_ids"""]
UpperCamelCase = input_ids.ne(1).to(lowerCamelCase_)
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
UpperCamelCase = FalconForSequenceClassification(lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def UpperCAmelCase__ ( self) -> Dict:
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = input_dict["""input_ids"""]
UpperCamelCase = FalconForCausalLM(lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
UpperCamelCase = model(lowerCamelCase_ , use_cache=lowerCamelCase_)
UpperCamelCase = input_ids.shape[0]
UpperCamelCase = model._convert_to_rw_cache(result.past_key_values)
UpperCamelCase = model._convert_cache_to_standard_format(lowerCamelCase_ , lowerCamelCase_)
for layer in range(len(lowerCamelCase_)):
for tensor_idx in range(2):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3)
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4)
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx]))
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = """multi_label_classification"""
UpperCamelCase = input_dict["""input_ids"""]
UpperCamelCase = input_ids.ne(1).to(lowerCamelCase_)
UpperCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
UpperCamelCase = FalconForSequenceClassification(lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def UpperCAmelCase__ ( self) -> Any:
for model_class in self.all_generative_model_classes:
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(lowerCamelCase_ , '''use_cache'''):
return
UpperCamelCase = model_class(lowerCamelCase_).to(lowerCamelCase_)
if "use_cache" not in inputs:
UpperCamelCase = True
UpperCamelCase = model(**lowerCamelCase_)
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
UpperCamelCase = (
getattr(lowerCamelCase_ , '''decoder_layers''' , lowerCamelCase_)
or getattr(lowerCamelCase_ , '''num_decoder_layers''' , lowerCamelCase_)
or config.num_hidden_layers
)
UpperCamelCase = getattr(lowerCamelCase_ , '''num_kv_heads''' , config.num_attention_heads)
UpperCamelCase = getattr(lowerCamelCase_ , '''d_model''' , config.hidden_size)
UpperCamelCase = embed_dim // num_attention_heads
UpperCamelCase = outputs["""past_key_values"""]
self.assertEqual(len(lowerCamelCase_) , lowerCamelCase_)
UpperCamelCase = inputs["""input_ids"""].shape
for i in range(lowerCamelCase_):
if config.new_decoder_architecture:
UpperCamelCase = config.num_attention_heads
elif config.multi_query:
UpperCamelCase = 1
self.assertEqual(len(past_kv[0]) , 2) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim))
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim))
@require_torch
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''')
UpperCamelCase = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''')
model.eval()
model.to(lowerCamelCase_)
UpperCamelCase = tokenizer('''My favorite food is''' , return_tensors='''pt''').to(lowerCamelCase_)
UpperCamelCase = (
"""My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."""
)
UpperCamelCase = model.generate(**lowerCamelCase_ , do_sample=lowerCamelCase_ , max_new_tokens=1_9)
UpperCamelCase = tokenizer.batch_decode(lowerCamelCase_)[0]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
@slow
def UpperCAmelCase__ ( self) -> int:
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
UpperCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase_)
UpperCamelCase = FalconForCausalLM.from_pretrained(lowerCamelCase_)
model.eval()
model.to(lowerCamelCase_)
UpperCamelCase = tokenizer('''My favorite food is''' , return_tensors='''pt''').to(lowerCamelCase_)
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**lowerCamelCase_ , do_sample=lowerCamelCase_ , max_new_tokens=4)
model.generate(**lowerCamelCase_ , do_sample=lowerCamelCase_ , max_new_tokens=4)
model.generate(**lowerCamelCase_ , num_beams=2 , max_new_tokens=4)
@slow
def UpperCAmelCase__ ( self) -> str:
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
UpperCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase_)
UpperCamelCase = FalconForCausalLM.from_pretrained(lowerCamelCase_)
model.eval()
model.to(device=lowerCamelCase_)
UpperCamelCase = tokenizer('''My favorite food is''' , return_tensors='''pt''').to(lowerCamelCase_)
# Test results are the same with and without cache
UpperCamelCase = model.generate(**lowerCamelCase_ , do_sample=lowerCamelCase_ , max_new_tokens=2_0 , use_cache=lowerCamelCase_)
UpperCamelCase = model.generate(**lowerCamelCase_ , do_sample=lowerCamelCase_ , max_new_tokens=2_0 , use_cache=lowerCamelCase_)
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0)
| 34
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def snake_case_ (__A : Dict ) -> List[Any]:
__lowerCAmelCase : Any = """huggingface/label-files"""
__lowerCAmelCase : List[Any] = """imagenet-1k-id2label.json"""
__lowerCAmelCase : Dict = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase : List[str] = {int(__A ): v for k, v in idalabel.items()}
__lowerCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
__lowerCAmelCase : Tuple = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
__lowerCAmelCase : List[Any] = BitConfig(
conv_layer=__A , num_labels=1_0_0_0 , idalabel=__A , labelaid=__A , )
return config
def snake_case_ (__A : Dict ) -> str:
if "stem.conv" in name:
__lowerCAmelCase : List[str] = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
__lowerCAmelCase : str = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
__lowerCAmelCase : str = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
__lowerCAmelCase : List[Any] = """bit.""" + name
if "bit" not in name and "classifier" not in name:
__lowerCAmelCase : Optional[int] = """bit.encoder.""" + name
return name
def snake_case_ () -> Optional[int]:
__lowerCAmelCase : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase : str = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def snake_case_ (__A : Optional[int] , __A : List[str] , __A : Any=False ) -> str:
__lowerCAmelCase : int = get_config(__A )
# load original model from timm
__lowerCAmelCase : Any = create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model
__lowerCAmelCase : List[Any] = timm_model.state_dict()
for key in state_dict.copy().keys():
__lowerCAmelCase : Dict = state_dict.pop(__A )
__lowerCAmelCase : str = val.squeeze() if """head""" in key else val
# load HuggingFace model
__lowerCAmelCase : Dict = BitForImageClassification(__A )
model.eval()
model.load_state_dict(__A )
# create image processor
__lowerCAmelCase : Optional[int] = create_transform(**resolve_data_config({} , model=__A ) )
__lowerCAmelCase : Optional[Any] = transform.transforms
__lowerCAmelCase : List[Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
__lowerCAmelCase : int = BitImageProcessor(
do_resize=__A , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__A , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__A , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__lowerCAmelCase : str = prepare_img()
__lowerCAmelCase : str = transform(__A ).unsqueeze(0 )
__lowerCAmelCase : Optional[int] = processor(__A , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__A , __A )
# verify logits
with torch.no_grad():
__lowerCAmelCase : Dict = model(__A )
__lowerCAmelCase : List[Any] = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
__lowerCAmelCase : List[Any] = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__A ).mkdir(exist_ok=__A )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
__UpperCAmelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 651
| 0
|
'''simple docstring'''
lowerCAmelCase_ : int = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __a ( __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ) -> str:
'''simple docstring'''
lowercase_ = [False] * len(__lowerCamelCase )
lowercase_ = [s]
lowercase_ = True
while queue:
lowercase_ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowerCamelCase )
lowercase_ = True
lowercase_ = u
return visited[t]
def __a ( __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = [-1] * (len(__lowerCamelCase ))
lowercase_ = 0
lowercase_ = []
lowercase_ = [i[:] for i in graph] # Record original cut, copy.
while bfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
lowercase_ = float("Inf" )
lowercase_ = sink
while s != source:
# Find the minimum value in select path
lowercase_ = min(__lowerCamelCase , graph[parent[s]][s] )
lowercase_ = parent[s]
max_flow += path_flow
lowercase_ = sink
while v != source:
lowercase_ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase_ = parent[v]
for i in range(len(__lowerCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 461
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( __lowerCamelCase , unittest.TestCase ):
lowerCamelCase_ =ReformerTokenizer
lowerCamelCase_ =ReformerTokenizerFast
lowerCamelCase_ =True
lowerCamelCase_ =False
lowerCamelCase_ =True
def __UpperCAmelCase ( self : Optional[Any]) -> Optional[Any]:
super().setUp()
lowercase_ = ReformerTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def __UpperCAmelCase ( self : int) -> int:
lowercase_ = "<s>"
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase) , __lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase) , __lowerCAmelCase)
def __UpperCAmelCase ( self : Tuple) -> Optional[int]:
lowercase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<unk>")
self.assertEqual(vocab_keys[1] , "<s>")
self.assertEqual(vocab_keys[-1] , "j")
self.assertEqual(len(__lowerCAmelCase) , 1000)
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def __UpperCAmelCase ( self : List[Any]) -> List[Any]:
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = "I was born in 92000, and this is falsé."
lowercase_ = tokenizer.tokenize(__lowerCAmelCase)
lowercase_ = rust_tokenizer.tokenize(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
lowercase_ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
lowercase_ = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(__lowerCAmelCase)
lowercase_ = rust_tokenizer.encode(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
def __UpperCAmelCase ( self : Tuple , __lowerCAmelCase : List[str]=15) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase_ = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
# Simple input
lowercase_ = "This is a simple input"
lowercase_ = ["This is a simple input 1", "This is a simple input 2"]
lowercase_ = ("This is a simple input", "This is a pair")
lowercase_ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length")
# Simple input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length")
# Simple input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length" , )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length")
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length")
# Pair input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length" , )
def __UpperCAmelCase ( self : Tuple) -> Tuple:
pass
def __UpperCAmelCase ( self : Any) -> Optional[Any]:
lowercase_ = ReformerTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
lowercase_ = tokenizer.tokenize("This is a test")
self.assertListEqual(__lowerCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [285, 46, 10, 170, 382] , )
lowercase_ = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ = tokenizer.convert_tokens_to_ids(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase_ = tokenizer.convert_ids_to_tokens(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __UpperCAmelCase ( self : List[str]) -> Any:
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment")
@slow
def __UpperCAmelCase ( self : Tuple) -> str:
lowercase_ = "Hello World!"
lowercase_ = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase))
@slow
def __UpperCAmelCase ( self : Any) -> Dict:
lowercase_ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
lowercase_ = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase))
@require_torch
@slow
def __UpperCAmelCase ( self : Any) -> Tuple:
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
lowercase_ = list(self.big_tokenizer.get_vocab().keys())[:10]
lowercase_ = " ".join(__lowerCAmelCase)
lowercase_ = self.big_tokenizer.encode_plus(__lowerCAmelCase , return_tensors="pt")
lowercase_ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt")
lowercase_ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
lowercase_ = encoded_sequence["input_ids"].shape
lowercase_ = ReformerModel(__lowerCAmelCase)
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__lowerCAmelCase)
model(**__lowerCAmelCase)
@slow
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
# fmt: off
lowercase_ = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
lowercase_ = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=__lowerCAmelCase , sequences=__lowerCAmelCase , )
| 461
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : list[int | str] ):
'''simple docstring'''
create_state_space_tree(SCREAMING_SNAKE_CASE , [] , 0 , [0 for i in range(len(SCREAMING_SNAKE_CASE ) )] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : list[int | str] , SCREAMING_SNAKE_CASE : list[int | str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , ):
'''simple docstring'''
if index == len(SCREAMING_SNAKE_CASE ):
print(SCREAMING_SNAKE_CASE )
return
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
__lowerCamelCase : List[Any] =True
create_state_space_tree(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 , SCREAMING_SNAKE_CASE )
current_sequence.pop()
__lowerCamelCase : List[Any] =False
_UpperCamelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
_UpperCamelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 179
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_UpperCamelCase = None
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_UpperCamelCase = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
_UpperCamelCase = {
'camembert-base': 512,
}
_UpperCamelCase = '▁'
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : List[str] = VOCAB_FILES_NAMES
__snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[str] = ["""input_ids""", """attention_mask"""]
__snake_case : List[Any] = CamembertTokenizer
def __init__( self :List[Any] , __lowercase :Optional[int]=None , __lowercase :str=None , __lowercase :Optional[Any]="<s>" , __lowercase :List[str]="</s>" , __lowercase :Tuple="</s>" , __lowercase :int="<s>" , __lowercase :Union[str, Any]="<unk>" , __lowercase :Optional[int]="<pad>" , __lowercase :Union[str, Any]="<mask>" , __lowercase :Tuple=["<s>NOTUSED", "</s>NOTUSED"] , **__lowercase :List[str] , ):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : Optional[int] =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
__lowercase , tokenizer_file=__lowercase , bos_token=__lowercase , eos_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , )
__lowerCamelCase : Any =vocab_file
__lowerCamelCase : Union[str, Any] =False if not self.vocab_file else True
def __lowercase ( self :List[str] , __lowercase :List[int] , __lowercase :Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase : Tuple =[self.cls_token_id]
__lowerCamelCase : str =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self :Tuple , __lowercase :List[int] , __lowercase :Optional[List[int]] = None ):
__lowerCamelCase : Any =[self.sep_token_id]
__lowerCamelCase : str =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowercase ( self :List[Any] , __lowercase :str , __lowercase :Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase : List[str] =os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 179
| 1
|
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
lowercase_ = 6_378_137.0
lowercase_ = 6_356_752.314_245
lowercase_ = 6_378_137
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
lowercase__ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
lowercase__ = atan((1 - flattening) * tan(radians(_SCREAMING_SNAKE_CASE ) ) )
lowercase__ = atan((1 - flattening) * tan(radians(_SCREAMING_SNAKE_CASE ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
lowercase__ = haversine_distance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
lowercase__ = (b_lata + b_lata) / 2
lowercase__ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
lowercase__ = (sin(_SCREAMING_SNAKE_CASE ) ** 2) * (cos(_SCREAMING_SNAKE_CASE ) ** 2)
lowercase__ = cos(sigma / 2 ) ** 2
lowercase__ = (sigma - sin(_SCREAMING_SNAKE_CASE )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
lowercase__ = (cos(_SCREAMING_SNAKE_CASE ) ** 2) * (sin(_SCREAMING_SNAKE_CASE ) ** 2)
lowercase__ = sin(sigma / 2 ) ** 2
lowercase__ = (sigma + sin(_SCREAMING_SNAKE_CASE )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int:
lowercase__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
| 0
|
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 162
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67
| 0
|
def snake_case__ ( UpperCAmelCase : list ):
lowerCAmelCase__ :Union[str, Any] = len(UpperCAmelCase )
for _ in range(UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowerCAmelCase__ ,lowerCAmelCase__ :Optional[int] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_a : List[str] = list(range(10, 0, -1))
print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 111
|
import re
def snake_case__ ( UpperCAmelCase : str ):
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def snake_case__ ( UpperCAmelCase : str ):
lowerCAmelCase__ :List[Any] = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def snake_case__ ( UpperCAmelCase : str , UpperCAmelCase : bool , UpperCAmelCase : str ):
try:
lowerCAmelCase__ :Dict = split_input(UpperCAmelCase )
if upper:
lowerCAmelCase__ :Dict = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowerCAmelCase__ :Any = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def snake_case__ ( UpperCAmelCase : str ):
return to_simple_case(UpperCAmelCase )
def snake_case__ ( UpperCAmelCase : str ):
try:
lowerCAmelCase__ :Any = to_simple_case(UpperCAmelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def snake_case__ ( UpperCAmelCase : str , UpperCAmelCase : bool ):
return to_complex_case(UpperCAmelCase , UpperCAmelCase , "_" )
def snake_case__ ( UpperCAmelCase : str , UpperCAmelCase : bool ):
return to_complex_case(UpperCAmelCase , UpperCAmelCase , "-" )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 111
| 1
|
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
UpperCamelCase_ = '''\
'''
UpperCamelCase_ = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
UpperCamelCase_ = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE( datasets.Metric ):
def __lowerCamelCase ( self : List[str] ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int = 16 , UpperCamelCase_ : bool = True , UpperCamelCase_ : List[str]=None ) -> Tuple:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
SCREAMING_SNAKE_CASE__ :Any = """cuda"""
else:
SCREAMING_SNAKE_CASE__ :Any = """cuda""" if torch.cuda.is_available() else """cpu"""
SCREAMING_SNAKE_CASE__ :Optional[Any] = AutoModelForCausalLM.from_pretrained(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = model.to(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :int = AutoTokenizer.from_pretrained(UpperCamelCase_ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
SCREAMING_SNAKE_CASE__ :List[Any] = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(UpperCamelCase_ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
SCREAMING_SNAKE_CASE__ :str = model.config.max_length - 1
else:
SCREAMING_SNAKE_CASE__ :str = model.config.max_length
SCREAMING_SNAKE_CASE__ :Any = tokenizer(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors='pt' , return_attention_mask=UpperCamelCase_ , ).to(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[str] = encodings["""input_ids"""]
SCREAMING_SNAKE_CASE__ :List[Any] = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
SCREAMING_SNAKE_CASE__ :Optional[int] = []
SCREAMING_SNAKE_CASE__ :Optional[int] = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(UpperCamelCase_ ) , UpperCamelCase_ ) ):
SCREAMING_SNAKE_CASE__ :int = min(start_index + batch_size , len(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ :Dict = encoded_texts[start_index:end_index]
SCREAMING_SNAKE_CASE__ :Union[str, Any] = attn_masks[start_index:end_index]
if add_start_token:
SCREAMING_SNAKE_CASE__ :Optional[int] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Any = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
SCREAMING_SNAKE_CASE__ :List[str] = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(UpperCamelCase_ ), attn_mask] , dim=1 )
SCREAMING_SNAKE_CASE__ :Tuple = encoded_batch
with torch.no_grad():
SCREAMING_SNAKE_CASE__ :Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ ).logits
SCREAMING_SNAKE_CASE__ :List[Any] = out_logits[..., :-1, :].contiguous()
SCREAMING_SNAKE_CASE__ :Union[str, Any] = labels[..., 1:].contiguous()
SCREAMING_SNAKE_CASE__ :int = attn_mask[..., 1:].contiguous()
SCREAMING_SNAKE_CASE__ :Dict = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , UpperCamelCase_ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(UpperCamelCase_ )}
| 209
|
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
__magic_name__ : int = ['input_ids', 'attention_mask']
def __init__( self : List[Any] , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : Tuple="<unk>" , lowerCAmelCase : Union[str, Any]="<pad>" , lowerCAmelCase : Tuple=125 , lowerCAmelCase : str=None , **lowerCAmelCase : Optional[Any] , ) -> None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
__UpperCamelCase : str = [F'''<extra_id_{i}>''' for i in range(lowerCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__UpperCamelCase : Tuple = len(set(filter(lambda lowerCAmelCase : bool("""extra_id""" in str(lowerCAmelCase ) ) , lowerCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
__UpperCamelCase : Tuple = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else pad_token
__UpperCamelCase : Tuple = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else eos_token
__UpperCamelCase : int = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else unk_token
super().__init__(
eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , extra_ids=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , **lowerCAmelCase , )
__UpperCamelCase : Any = extra_ids
__UpperCamelCase : List[Any] = 2**8 # utf is 8 bits
# define special tokens dict
__UpperCamelCase : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__UpperCamelCase : int = len(self.special_tokens_encoder )
__UpperCamelCase : Tuple = len(lowerCAmelCase )
for i, token in enumerate(lowerCAmelCase ):
__UpperCamelCase : Optional[int] = self.vocab_size + i - n
__UpperCamelCase : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def lowerCamelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCAmelCase )) + [1]
return ([0] * len(lowerCAmelCase )) + [1] + ([0] * len(lowerCAmelCase )) + [1]
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : List[int] ) -> List[int]:
"""simple docstring"""
if len(lowerCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__UpperCamelCase : Any = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__UpperCamelCase : List[str] = self._add_eos_if_not_present(lowerCAmelCase )
if token_ids_a is None:
return token_ids_a
else:
__UpperCamelCase : List[Any] = self._add_eos_if_not_present(lowerCAmelCase )
return token_ids_a + token_ids_a
def lowerCamelCase__ ( self : str , lowerCAmelCase : str ) -> List[str]:
"""simple docstring"""
__UpperCamelCase : Optional[int] = [chr(lowerCAmelCase ) for i in text.encode("""utf-8""" )]
return tokens
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
if token in self.special_tokens_encoder:
__UpperCamelCase : Dict = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__UpperCamelCase : Tuple = self.added_tokens_encoder[token]
elif len(lowerCAmelCase ) != 1:
__UpperCamelCase : Dict = self.unk_token_id
else:
__UpperCamelCase : Union[str, Any] = ord(lowerCAmelCase ) + self._num_special_tokens
return token_id
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
if index in self.special_tokens_decoder:
__UpperCamelCase : str = self.special_tokens_decoder[index]
else:
__UpperCamelCase : Any = chr(index - self._num_special_tokens )
return token
def lowerCamelCase__ ( self : str , lowerCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = b""""""
for token in tokens:
if token in self.special_tokens_decoder:
__UpperCamelCase : Optional[Any] = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
__UpperCamelCase : Optional[int] = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
__UpperCamelCase : Tuple = token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
__UpperCamelCase : Tuple = token.encode("""utf-8""" )
else:
__UpperCamelCase : Union[str, Any] = bytes([ord(lowerCAmelCase )] )
bstring += tok_string
__UpperCamelCase : Optional[int] = bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
return ()
| 279
| 0
|
'''simple docstring'''
import sys
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(__UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = [[0 for x in range(__UpperCamelCase )] for x in range(__UpperCamelCase )]
__SCREAMING_SNAKE_CASE : Tuple = [[0 for x in range(__UpperCamelCase )] for x in range(__UpperCamelCase )]
for chain_length in range(2 , __UpperCamelCase ):
for a in range(1 , n - chain_length + 1 ):
__SCREAMING_SNAKE_CASE : Any = a + chain_length - 1
__SCREAMING_SNAKE_CASE : Union[str, Any] = sys.maxsize
for c in range(__UpperCamelCase , __UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = cost
__SCREAMING_SNAKE_CASE : str = c
return matrix, sol
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
if i == j:
print('''A''' + str(__UpperCamelCase ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(__UpperCamelCase , __UpperCamelCase , optimal_solution[i][j] )
print_optiomal_solution(__UpperCamelCase , optimal_solution[i][j] + 1 , __UpperCamelCase )
print(''')''' , end=''' ''' )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = [30, 35, 15, 5, 10, 20, 25]
__SCREAMING_SNAKE_CASE : str = len(__UpperCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = matrix_chain_order(__UpperCamelCase )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__UpperCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 713
|
import pprint
import requests
lowercase_ = """https://zenquotes.io/api"""
def a__ ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def a__ ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
lowercase_ = random_quotes()
pprint.pprint(response)
| 131
| 0
|
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
lowerCamelCase_ = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_28, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
lowerCamelCase_ = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_55, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
lowerCamelCase_ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_55)
lowerCamelCase_ = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
lowerCamelCase_ = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
lowerCamelCase_ = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
lowerCamelCase_ = tf.keras.preprocessing.image.img_to_array(test_image)
lowerCamelCase_ = np.expand_dims(test_image, axis=0)
lowerCamelCase_ = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
lowerCamelCase_ = '''Normal'''
if result[0][0] == 1:
lowerCamelCase_ = '''Abnormality detected'''
| 330
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> int:
'''simple docstring'''
for attribute in key.split("." ):
_A = getattr(__lowercase , __lowercase )
if weight_type is not None:
_A = getattr(__lowercase , __lowercase ).shape
else:
_A = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_A = value
elif weight_type == "weight_g":
_A = value
elif weight_type == "weight_v":
_A = value
elif weight_type == "bias":
_A = value
else:
_A = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> str:
'''simple docstring'''
_A = []
_A = fairseq_model.state_dict()
_A = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_A = False
if "conv_layers" in name:
load_conv_layer(
__lowercase , __lowercase , __lowercase , __lowercase , hf_model.config.feat_extract_norm == "group" , )
_A = True
else:
for key, mapped_key in MAPPING.items():
_A = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned):
_A = True
if "*" in mapped_key:
_A = name.split(__lowercase )[0].split("." )[-2]
_A = mapped_key.replace("*" , __lowercase )
if "weight_g" in name:
_A = "weight_g"
elif "weight_v" in name:
_A = "weight_v"
elif "weight" in name:
_A = "weight"
elif "bias" in name:
_A = "bias"
else:
_A = None
set_recursively(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
continue
if not is_used:
unused_weights.append(__lowercase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple:
'''simple docstring'''
_A = full_name.split("conv_layers." )[-1]
_A = name.split("." )
_A = int(items[0] )
_A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowercase )
@torch.no_grad()
def __lowercase ( __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=True ) -> int:
'''simple docstring'''
if config_path is not None:
_A = HubertConfig.from_pretrained(__lowercase )
else:
_A = HubertConfig()
if is_finetuned:
if dict_path:
_A = Dictionary.load(__lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_A = target_dict.pad_index
_A = target_dict.bos_index
_A = target_dict.eos_index
_A = len(target_dict.symbols )
_A = os.path.join(__lowercase , "vocab.json" )
if not os.path.isdir(__lowercase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__lowercase ) )
return
os.makedirs(__lowercase , exist_ok=__lowercase )
with open(__lowercase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , __lowercase )
_A = WavaVecaCTCTokenizer(
__lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__lowercase , )
_A = True if config.feat_extract_norm == "layer" else False
_A = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowercase , return_attention_mask=__lowercase , )
_A = WavaVecaProcessor(feature_extractor=__lowercase , tokenizer=__lowercase )
processor.save_pretrained(__lowercase )
_A = HubertForCTC(__lowercase )
else:
_A = HubertModel(__lowercase )
if is_finetuned:
_A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_A = model[0].eval()
recursively_load_weights(__lowercase , __lowercase , __lowercase )
hf_wavavec.save_pretrained(__lowercase )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCamelCase_ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 330
| 1
|
'''simple docstring'''
from datetime import datetime
import requests
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
__UpperCAmelCase = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(lowerCAmelCase_ ).content
if __name__ == "__main__":
lowerCAmelCase__ : Optional[Any] = input("Enter Video/IGTV url: ").strip()
lowerCAmelCase__ : Any = f"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f"Done. Video saved to disk as {file_name}.")
| 703
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE = '''BlipImageProcessor'''
SCREAMING_SNAKE_CASE = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = False
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = self.image_processor
def __call__( self : Optional[int] , UpperCAmelCase_ : ImageInput = None , UpperCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase_ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
__UpperCAmelCase : Optional[int] = self.tokenizer
__UpperCAmelCase : Dict = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
return text_encoding
# add pixel_values
__UpperCAmelCase : Dict = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
if text is not None:
__UpperCAmelCase : Any = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
else:
__UpperCAmelCase : str = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase_ )
return encoding_image_processor
def lowerCamelCase_ ( self : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCamelCase_ ( self : Any , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.tokenizer.model_input_names
__UpperCAmelCase : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 329
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Optional[int] = logging.get_logger(__name__)
snake_case_ : List[Any] = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = "pegasus"
lowerCamelCase = ["past_key_values"]
lowerCamelCase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Dict , __magic_name__ : List[str]=5_0265 , __magic_name__ : Union[str, Any]=1024 , __magic_name__ : Optional[int]=12 , __magic_name__ : Dict=4096 , __magic_name__ : str=16 , __magic_name__ : str=12 , __magic_name__ : Optional[Any]=4096 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : List[str]=True , __magic_name__ : List[Any]=True , __magic_name__ : str="gelu" , __magic_name__ : Tuple=1024 , __magic_name__ : str=0.1 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : str=0.02 , __magic_name__ : Dict=0 , __magic_name__ : Tuple=False , __magic_name__ : Optional[int]=0 , __magic_name__ : Any=1 , __magic_name__ : str=1 , **__magic_name__ : Tuple , ) -> Union[str, Any]:
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : Optional[int] = max_position_embeddings
lowerCamelCase_ : Dict = d_model
lowerCamelCase_ : Tuple = encoder_ffn_dim
lowerCamelCase_ : str = encoder_layers
lowerCamelCase_ : Dict = encoder_attention_heads
lowerCamelCase_ : Dict = decoder_ffn_dim
lowerCamelCase_ : str = decoder_layers
lowerCamelCase_ : Optional[int] = decoder_attention_heads
lowerCamelCase_ : Tuple = dropout
lowerCamelCase_ : str = attention_dropout
lowerCamelCase_ : str = activation_dropout
lowerCamelCase_ : List[Any] = activation_function
lowerCamelCase_ : List[str] = init_std
lowerCamelCase_ : str = encoder_layerdrop
lowerCamelCase_ : Optional[Any] = decoder_layerdrop
lowerCamelCase_ : Dict = use_cache
lowerCamelCase_ : Tuple = encoder_layers
lowerCamelCase_ : str = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
return self.encoder_attention_heads
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
return self.d_model
| 488
|
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCamelCase ( A,unittest.TestCase ):
'''simple docstring'''
a_ : Union[str, Any] = CodeGenTokenizer
a_ : str = CodeGenTokenizerFast
a_ : int = True
a_ : str = {"add_prefix_space": True}
a_ : Optional[int] = False
def _snake_case ( self : int ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
__lowerCamelCase : Union[str, Any] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
__lowerCamelCase : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowerCamelCase : Tuple = {"""unk_token""": """<unk>"""}
__lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowerCamelCase ) )
def _snake_case ( self : int , **_lowerCamelCase : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _snake_case ( self : Union[str, Any] , **_lowerCamelCase : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _snake_case ( self : str , _lowerCamelCase : List[Any] ):
'''simple docstring'''
__lowerCamelCase : List[str] = """lower newer"""
__lowerCamelCase : Tuple = """lower newer"""
return input_text, output_text
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase : Tuple = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase : Optional[Any] = """lower newer"""
__lowerCamelCase : Optional[int] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowerCamelCase : List[str] = tokenizer.tokenize(_lowerCamelCase , add_prefix_space=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Tuple = tokens + [tokenizer.unk_token]
__lowerCamelCase : Optional[int] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def _snake_case ( self : int ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__lowerCamelCase : List[str] = self.get_tokenizer()
__lowerCamelCase : Tuple = self.get_rust_tokenizer(add_prefix_space=_lowerCamelCase )
__lowerCamelCase : Tuple = """lower newer"""
# Testing tokenization
__lowerCamelCase : Any = tokenizer.tokenize(_lowerCamelCase , add_prefix_space=_lowerCamelCase )
__lowerCamelCase : Tuple = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Testing conversion to ids without special tokens
__lowerCamelCase : List[Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase , add_prefix_space=_lowerCamelCase )
__lowerCamelCase : Tuple = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Testing conversion to ids with special tokens
__lowerCamelCase : List[str] = self.get_rust_tokenizer(add_prefix_space=_lowerCamelCase )
__lowerCamelCase : List[str] = tokenizer.encode(_lowerCamelCase , add_prefix_space=_lowerCamelCase )
__lowerCamelCase : Optional[Any] = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Testing the unknown token
__lowerCamelCase : Any = tokens + [rust_tokenizer.unk_token]
__lowerCamelCase : str = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def _snake_case ( self : List[Any] , *_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
pass
def _snake_case ( self : Optional[int] , _lowerCamelCase : Dict=1_5 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCamelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
# Simple input
__lowerCamelCase : Optional[int] = """This is a simple input"""
__lowerCamelCase : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
__lowerCamelCase : str = ("""This is a simple input""", """This is a pair""")
__lowerCamelCase : Any = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" , )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowerCamelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
__lowerCamelCase : Dict = """This is a simple input"""
__lowerCamelCase : str = ["""This is a simple input looooooooong""", """This is a simple input"""]
__lowerCamelCase : Tuple = ("""This is a simple input""", """This is a pair""")
__lowerCamelCase : List[Any] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
__lowerCamelCase : List[Any] = tokenizer.pad_token_id
__lowerCamelCase : Tuple = tokenizer(_lowerCamelCase , padding="""max_length""" , max_length=3_0 , return_tensors="""np""" )
__lowerCamelCase : Union[str, Any] = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , truncate=_lowerCamelCase , return_tensors="""np""" )
__lowerCamelCase : Optional[int] = tokenizer(*_lowerCamelCase , padding="""max_length""" , max_length=6_0 , return_tensors="""np""" )
__lowerCamelCase : Dict = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , truncate=_lowerCamelCase , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowerCamelCase : List[Any] = """$$$"""
__lowerCamelCase : Optional[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=_lowerCamelCase , add_bos_token=_lowerCamelCase )
__lowerCamelCase : Any = """This is a simple input"""
__lowerCamelCase : Any = ["""This is a simple input 1""", """This is a simple input 2"""]
__lowerCamelCase : str = tokenizer.bos_token_id
__lowerCamelCase : Union[str, Any] = tokenizer(_lowerCamelCase )
__lowerCamelCase : Tuple = tokenizer(_lowerCamelCase )
self.assertEqual(out_s.input_ids[0] , _lowerCamelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__lowerCamelCase : Any = tokenizer.decode(out_s.input_ids )
__lowerCamelCase : Tuple = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _lowerCamelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowerCamelCase : str = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
__lowerCamelCase : int = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
__lowerCamelCase : str = """\nif len_a > len_b: result = a\nelse: result = b"""
__lowerCamelCase : Any = tokenizer.encode(_lowerCamelCase )
__lowerCamelCase : Dict = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
__lowerCamelCase : List[str] = tokenizer.decode(_lowerCamelCase , truncate_before_pattern=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _snake_case ( self : int ):
'''simple docstring'''
pass
| 519
| 0
|
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Tuple = ['''vqvae''']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , mel=lowerCAmelCase__ , vqvae=lowerCAmelCase__)
def snake_case_ ( self):
return 5_0 if isinstance(self.scheduler , lowerCAmelCase__) else 1_0_0_0
@torch.no_grad()
def __call__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__=True , ):
__SCREAMING_SNAKE_CASE = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
__SCREAMING_SNAKE_CASE = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__SCREAMING_SNAKE_CASE = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowerCAmelCase__ , device=self.device , )
__SCREAMING_SNAKE_CASE = noise
__SCREAMING_SNAKE_CASE = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.mel.audio_slice_to_image(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = np.frombuffer(input_image.tobytes() , dtype="""uint8""").reshape(
(input_image.height, input_image.width))
__SCREAMING_SNAKE_CASE = (input_image / 2_5_5) * 2 - 1
__SCREAMING_SNAKE_CASE = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
__SCREAMING_SNAKE_CASE = self.vqvae.encode(torch.unsqueeze(lowerCAmelCase__ , 0)).latent_dist.sample(
generator=lowerCAmelCase__)[0]
__SCREAMING_SNAKE_CASE = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__SCREAMING_SNAKE_CASE = self.scheduler.add_noise(lowerCAmelCase__ , lowerCAmelCase__ , self.scheduler.timesteps[start_step - 1])
__SCREAMING_SNAKE_CASE = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__SCREAMING_SNAKE_CASE = int(mask_start_secs * pixels_per_second)
__SCREAMING_SNAKE_CASE = int(mask_end_secs * pixels_per_second)
__SCREAMING_SNAKE_CASE = self.scheduler.add_noise(lowerCAmelCase__ , lowerCAmelCase__ , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.unet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)["""sample"""]
else:
__SCREAMING_SNAKE_CASE = self.unet(lowerCAmelCase__ , lowerCAmelCase__)["""sample"""]
if isinstance(self.scheduler , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.scheduler.step(
model_output=lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , )["""prev_sample"""]
else:
__SCREAMING_SNAKE_CASE = self.scheduler.step(
model_output=lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , generator=lowerCAmelCase__ , )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
__SCREAMING_SNAKE_CASE = mask[:, step, :, :mask_start]
if mask_end > 0:
__SCREAMING_SNAKE_CASE = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__SCREAMING_SNAKE_CASE = 1 / self.vqvae.config.scaling_factor * images
__SCREAMING_SNAKE_CASE = self.vqvae.decode(lowerCAmelCase__)["""sample"""]
__SCREAMING_SNAKE_CASE = (images / 2 + 0.5).clamp(0 , 1)
__SCREAMING_SNAKE_CASE = images.cpu().permute(0 , 2 , 3 , 1).numpy()
__SCREAMING_SNAKE_CASE = (images * 2_5_5).round().astype("""uint8""")
__SCREAMING_SNAKE_CASE = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowerCAmelCase__ , mode="""RGB""").convert("""L""") for _ in images))
__SCREAMING_SNAKE_CASE = [self.mel.image_to_audio(lowerCAmelCase__) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowerCAmelCase__)[:, np.newaxis, :]) , **ImagePipelineOutput(lowerCAmelCase__))
@torch.no_grad()
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = 5_0):
assert isinstance(self.scheduler , lowerCAmelCase__)
self.scheduler.set_timesteps(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""").reshape((1, image.height, image.width)) for image in images])
__SCREAMING_SNAKE_CASE = (sample / 2_5_5) * 2 - 1
__SCREAMING_SNAKE_CASE = torch.Tensor(lowerCAmelCase__).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
__SCREAMING_SNAKE_CASE = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__SCREAMING_SNAKE_CASE = self.scheduler.alphas_cumprod[t]
__SCREAMING_SNAKE_CASE = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__SCREAMING_SNAKE_CASE = 1 - alpha_prod_t
__SCREAMING_SNAKE_CASE = self.unet(lowerCAmelCase__ , lowerCAmelCase__)["""sample"""]
__SCREAMING_SNAKE_CASE = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__SCREAMING_SNAKE_CASE = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__SCREAMING_SNAKE_CASE = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def snake_case_ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = acos(torch.dot(torch.flatten(lowerCAmelCase__) , torch.flatten(lowerCAmelCase__)) / torch.norm(lowerCAmelCase__) / torch.norm(lowerCAmelCase__))
return sin((1 - alpha) * theta) * xa / sin(lowerCAmelCase__) + sin(alpha * theta) * xa / sin(lowerCAmelCase__)
| 702
|
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def snake_case_ ( self , lowerCAmelCase__):
raise NotImplementedError()
def snake_case_ ( self):
raise NotImplementedError()
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , **lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = tokenizer
__SCREAMING_SNAKE_CASE = skip_prompt
__SCREAMING_SNAKE_CASE = decode_kwargs
# variables used in the streaming process
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = True
def snake_case_ ( self , lowerCAmelCase__):
if len(value.shape) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""")
elif len(value.shape) > 1:
__SCREAMING_SNAKE_CASE = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
__SCREAMING_SNAKE_CASE = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist())
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(self.token_cache , **self.decode_kwargs)
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n"""):
__SCREAMING_SNAKE_CASE = text[self.print_len :]
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
# If the last token is a CJK character, we print the characters.
elif len(lowerCAmelCase__) > 0 and self._is_chinese_char(ord(text[-1])):
__SCREAMING_SNAKE_CASE = text[self.print_len :]
self.print_len += len(lowerCAmelCase__)
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
__SCREAMING_SNAKE_CASE = text[self.print_len : text.rfind(""" """) + 1]
self.print_len += len(lowerCAmelCase__)
self.on_finalized_text(lowerCAmelCase__)
def snake_case_ ( self):
# Flush the cache, if it exists
if len(self.token_cache) > 0:
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(self.token_cache , **self.decode_kwargs)
__SCREAMING_SNAKE_CASE = text[self.print_len :]
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
else:
__SCREAMING_SNAKE_CASE = """"""
__SCREAMING_SNAKE_CASE = True
self.on_finalized_text(lowerCAmelCase__ , stream_end=lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False):
print(lowerCAmelCase__ , flush=lowerCAmelCase__ , end="""""" if not stream_end else None)
def snake_case_ ( self , lowerCAmelCase__):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_e_0_0 and cp <= 0X9_f_f_f)
or (cp >= 0X3_4_0_0 and cp <= 0X4_d_b_f) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_a_6_d_f) #
or (cp >= 0X2_a_7_0_0 and cp <= 0X2_b_7_3_f) #
or (cp >= 0X2_b_7_4_0 and cp <= 0X2_b_8_1_f) #
or (cp >= 0X2_b_8_2_0 and cp <= 0X2_c_e_a_f) #
or (cp >= 0Xf_9_0_0 and cp <= 0Xf_a_f_f)
or (cp >= 0X2_f_8_0_0 and cp <= 0X2_f_a_1_f) #
): #
return True
return False
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = None , **lowerCAmelCase__):
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = Queue()
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = timeout
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False):
self.text_queue.put(lowerCAmelCase__ , timeout=self.timeout)
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout)
def __iter__( self):
return self
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.text_queue.get(timeout=self.timeout)
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 248
| 0
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__snake_case = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
UpperCamelCase = self.transformer_dir
shutil.copy(
os.path.join(__magic_name__ , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def lowerCamelCase_ ( self : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any]=None ):
"""simple docstring"""
UpperCamelCase = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
UpperCamelCase = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
UpperCamelCase = black.format_str(__magic_name__ , mode=__magic_name__ )
UpperCamelCase = os.path.join(self.transformer_dir , """new_code.py""" )
with open(__magic_name__ , """w""" , newline="""\n""" ) as f:
f.write(__magic_name__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__magic_name__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__magic_name__ )
with open(__magic_name__ , """r""" ) as f:
self.assertTrue(f.read() , __magic_name__ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , __magic_name__ , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , __magic_name__ ) , )
# Copy consistency with a really long name
UpperCamelCase = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , F'{long_class_name}LMPredictionHead' , re.sub("""Bert""" , __magic_name__ , __magic_name__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , __magic_name__ , overwrite_result=re.sub("""Bert""" , """TestModel""" , __magic_name__ ) , )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
UpperCamelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
UpperCamelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
UpperCamelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
UpperCamelCase , UpperCamelCase = check_copies.convert_to_localized_md(
__magic_name__ , __magic_name__ , localized_readme["""format_model_list"""] )
self.assertFalse(__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
UpperCamelCase , UpperCamelCase = check_copies.convert_to_localized_md(
__magic_name__ , __magic_name__ , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__magic_name__ )
UpperCamelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
UpperCamelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
UpperCamelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
UpperCamelCase , UpperCamelCase = check_copies.convert_to_localized_md(
__magic_name__ , __magic_name__ , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(__magic_name__ , __magic_name__ )
| 386
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__snake_case = logging.get_logger(__name__)
__snake_case = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class UpperCAmelCase :
def __init__( self : Dict , __magic_name__ : List[Any]=None , **__magic_name__ : List[str] ):
"""simple docstring"""
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
UpperCamelCase = model
UpperCamelCase = kwargs.get("""model_save_dir""" , __magic_name__ )
UpperCamelCase = kwargs.get("""latest_model_name""" , __magic_name__ )
def __call__( self : Optional[Any] , **__magic_name__ : Dict ):
"""simple docstring"""
UpperCamelCase = {k: np.array(__magic_name__ ) for k, v in kwargs.items()}
return self.model.run(__magic_name__ , __magic_name__ )
@staticmethod
def lowerCamelCase_ ( __magic_name__ : Union[str, Path] , __magic_name__ : Optional[int]=None , __magic_name__ : List[str]=None ):
"""simple docstring"""
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
UpperCamelCase = """CPUExecutionProvider"""
return ort.InferenceSession(__magic_name__ , providers=[provider] , sess_options=__magic_name__ )
def lowerCamelCase_ ( self : Any , __magic_name__ : Union[str, Path] , __magic_name__ : Optional[str] = None , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCamelCase = self.model_save_dir.joinpath(self.latest_model_name )
UpperCamelCase = Path(__magic_name__ ).joinpath(__magic_name__ )
try:
shutil.copyfile(__magic_name__ , __magic_name__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCamelCase = self.model_save_dir.joinpath(__magic_name__ )
if src_path.exists():
UpperCamelCase = Path(__magic_name__ ).joinpath(__magic_name__ )
try:
shutil.copyfile(__magic_name__ , __magic_name__ )
except shutil.SameFileError:
pass
def lowerCamelCase_ ( self : Any , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : int , ):
"""simple docstring"""
if os.path.isfile(__magic_name__ ):
logger.error(F'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
# saving model weights/files
self._save_pretrained(__magic_name__ , **__magic_name__ )
@classmethod
def lowerCamelCase_ ( cls : List[str] , __magic_name__ : Union[str, Path] , __magic_name__ : Optional[Union[bool, str, None]] = None , __magic_name__ : Optional[Union[str, None]] = None , __magic_name__ : bool = False , __magic_name__ : Optional[str] = None , __magic_name__ : Optional[str] = None , __magic_name__ : Optional[str] = None , __magic_name__ : Optional["ort.SessionOptions"] = None , **__magic_name__ : List[Any] , ):
"""simple docstring"""
UpperCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__magic_name__ ):
UpperCamelCase = OnnxRuntimeModel.load_model(
os.path.join(__magic_name__ , __magic_name__ ) , provider=__magic_name__ , sess_options=__magic_name__ )
UpperCamelCase = Path(__magic_name__ )
# load model from hub
else:
# download model
UpperCamelCase = hf_hub_download(
repo_id=__magic_name__ , filename=__magic_name__ , use_auth_token=__magic_name__ , revision=__magic_name__ , cache_dir=__magic_name__ , force_download=__magic_name__ , )
UpperCamelCase = Path(__magic_name__ ).parent
UpperCamelCase = Path(__magic_name__ ).name
UpperCamelCase = OnnxRuntimeModel.load_model(__magic_name__ , provider=__magic_name__ , sess_options=__magic_name__ )
return cls(model=__magic_name__ , **__magic_name__ )
@classmethod
def lowerCamelCase_ ( cls : Any , __magic_name__ : Union[str, Path] , __magic_name__ : bool = True , __magic_name__ : Optional[str] = None , __magic_name__ : Optional[str] = None , **__magic_name__ : str , ):
"""simple docstring"""
UpperCamelCase = None
if len(str(__magic_name__ ).split("""@""" ) ) == 2:
UpperCamelCase , UpperCamelCase = model_id.split("""@""" )
return cls._from_pretrained(
model_id=__magic_name__ , revision=__magic_name__ , cache_dir=__magic_name__ , force_download=__magic_name__ , use_auth_token=__magic_name__ , **__magic_name__ , )
| 386
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] ):
__UpperCamelCase = tempfile.mkdtemp()
# fmt: off
__UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__UpperCamelCase = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
__UpperCamelCase = os.path.join(self.tmpdirname , snake_case )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(snake_case , snake_case )
def snake_case ( self : List[Any] , **snake_case : str ):
return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def snake_case ( self : int , **snake_case : Any ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case )
def snake_case ( self : Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Any ):
__UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__UpperCamelCase = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self : Optional[int] ):
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def snake_case ( self : Union[str, Any] ):
__UpperCamelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__UpperCamelCase = self.get_image_processor(do_normalize=snake_case , padding_value=1.0 )
__UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def snake_case ( self : int ):
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case )
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = image_processor(snake_case , return_tensors='''np''' )
__UpperCamelCase = processor(images=snake_case , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case ( self : List[Any] ):
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case )
__UpperCamelCase = '''lower newer'''
__UpperCamelCase = processor(text=snake_case )
__UpperCamelCase = tokenizer(snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self : List[str] ):
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case )
__UpperCamelCase = '''lower newer'''
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(snake_case ):
processor()
def snake_case ( self : Optional[int] ):
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case )
__UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase = processor.batch_decode(snake_case )
__UpperCamelCase = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def snake_case ( self : Optional[int] ):
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case )
__UpperCamelCase = '''lower newer'''
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 375
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
a_ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
a_ = get_tests_dir("fixtures/vocab.json")
a_ = get_tests_dir("fixtures")
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def snake_case ( self : Optional[Any] ):
__UpperCamelCase = 0
def snake_case ( self : Tuple ):
__UpperCamelCase = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(snake_case , snake_case )
def snake_case ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = WavaVecaConfig()
__UpperCamelCase = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(snake_case )
processor.save_pretrained(snake_case )
__UpperCamelCase = AutoProcessor.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
def snake_case ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(snake_case , os.path.join(snake_case , snake_case ) )
copyfile(snake_case , os.path.join(snake_case , '''vocab.json''' ) )
__UpperCamelCase = AutoProcessor.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
def snake_case ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = WavaVecaFeatureExtractor()
__UpperCamelCase = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
__UpperCamelCase = WavaVecaProcessor(snake_case , snake_case )
# save in new folder
processor.save_pretrained(snake_case )
# drop `processor_class` in tokenizer
with open(os.path.join(snake_case , snake_case ) , '''r''' ) as f:
__UpperCamelCase = json.load(snake_case )
config_dict.pop('''processor_class''' )
with open(os.path.join(snake_case , snake_case ) , '''w''' ) as f:
f.write(json.dumps(snake_case ) )
__UpperCamelCase = AutoProcessor.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
def snake_case ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = WavaVecaFeatureExtractor()
__UpperCamelCase = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
__UpperCamelCase = WavaVecaProcessor(snake_case , snake_case )
# save in new folder
processor.save_pretrained(snake_case )
# drop `processor_class` in feature extractor
with open(os.path.join(snake_case , snake_case ) , '''r''' ) as f:
__UpperCamelCase = json.load(snake_case )
config_dict.pop('''processor_class''' )
with open(os.path.join(snake_case , snake_case ) , '''w''' ) as f:
f.write(json.dumps(snake_case ) )
__UpperCamelCase = AutoProcessor.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
def snake_case ( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(snake_case )
# copy relevant files
copyfile(snake_case , os.path.join(snake_case , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(snake_case , snake_case ) , '''w''' ) as f:
f.write('''{}''' )
__UpperCamelCase = AutoProcessor.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
def snake_case ( self : int ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(snake_case ):
__UpperCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case ):
__UpperCamelCase = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=snake_case )
__UpperCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=snake_case )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
__UpperCamelCase = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
__UpperCamelCase = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
__UpperCamelCase = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=snake_case , use_fast=snake_case )
__UpperCamelCase = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def snake_case ( self : List[Any] ):
try:
AutoConfig.register('''custom''' , snake_case )
AutoFeatureExtractor.register(snake_case , snake_case )
AutoTokenizer.register(snake_case , slow_tokenizer_class=snake_case )
AutoProcessor.register(snake_case , snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case ):
AutoProcessor.register(snake_case , snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCamelCase = CustomFeatureExtractor.from_pretrained(snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase = os.path.join(snake_case , '''vocab.txt''' )
with open(snake_case , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__UpperCamelCase = CustomTokenizer(snake_case )
__UpperCamelCase = CustomProcessor(snake_case , snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(snake_case )
__UpperCamelCase = AutoProcessor.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def snake_case ( self : Optional[int] ):
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = False
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = False
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : Any = "AutoFeatureExtractor"
lowerCAmelCase__ : str = "AutoTokenizer"
lowerCAmelCase__ : str = False
try:
AutoConfig.register('''custom''' , snake_case )
AutoFeatureExtractor.register(snake_case , snake_case )
AutoTokenizer.register(snake_case , slow_tokenizer_class=snake_case )
AutoProcessor.register(snake_case , snake_case )
# If remote code is not set, the default is to use local classes.
__UpperCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
__UpperCamelCase = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=snake_case )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
__UpperCamelCase = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=snake_case )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def snake_case ( self : Optional[int] ):
__UpperCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def snake_case ( self : str ):
__UpperCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : int = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def snake_case ( cls : List[str] ):
__UpperCamelCase = TOKEN
HfFolder.save_token(snake_case )
@classmethod
def snake_case ( cls : Union[str, Any] ):
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def snake_case ( self : int ):
__UpperCamelCase = WavaVecaProcessor.from_pretrained(snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(snake_case , '''test-processor''' ) , push_to_hub=snake_case , use_auth_token=self._token )
__UpperCamelCase = WavaVecaProcessor.from_pretrained(F"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(snake_case , getattr(new_processor.feature_extractor , snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def snake_case ( self : Any ):
__UpperCamelCase = WavaVecaProcessor.from_pretrained(snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(snake_case , '''test-processor-org''' ) , push_to_hub=snake_case , use_auth_token=self._token , organization='''valid_org''' , )
__UpperCamelCase = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(snake_case , getattr(new_processor.feature_extractor , snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def snake_case ( self : List[str] ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
__UpperCamelCase = CustomFeatureExtractor.from_pretrained(snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase = os.path.join(snake_case , '''vocab.txt''' )
with open(snake_case , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__UpperCamelCase = CustomTokenizer(snake_case )
__UpperCamelCase = CustomProcessor(snake_case , snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token )
__UpperCamelCase = Repository(snake_case , clone_from=F"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(snake_case )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(snake_case , '''tokenizer_config.json''' ) ) as f:
__UpperCamelCase = json.load(snake_case )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(snake_case , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(snake_case , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(snake_case , '''custom_processing.py''' ) ) )
repo.push_to_hub()
__UpperCamelCase = AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=snake_case )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 375
| 1
|
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
try:
UpperCAmelCase__ : Union[str, Any] = float(__UpperCamelCase )
except ValueError:
raise ValueError("""Please enter a valid number""" )
UpperCAmelCase__ : List[str] = decimal - int(__UpperCamelCase )
if fractional_part == 0:
return int(__UpperCamelCase ), 1
else:
UpperCAmelCase__ : Optional[Any] = len(str(__UpperCamelCase ).split(""".""" )[1] )
UpperCAmelCase__ : List[Any] = int(decimal * (10**number_of_frac_digits) )
UpperCAmelCase__ : Any = 10**number_of_frac_digits
UpperCAmelCase__ , UpperCAmelCase__ : int = denominator, numerator
while True:
UpperCAmelCase__ : int = dividend % divisor
if remainder == 0:
break
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = divisor, remainder
UpperCAmelCase__ , UpperCAmelCase__ : int = numerator / divisor, denominator / divisor
return int(__UpperCamelCase ), int(__UpperCamelCase )
if __name__ == "__main__":
print(F"{decimal_to_fraction(2) = }")
print(F"{decimal_to_fraction(89.0) = }")
print(F"{decimal_to_fraction('67') = }")
print(F"{decimal_to_fraction('45.0') = }")
print(F"{decimal_to_fraction(1.5) = }")
print(F"{decimal_to_fraction('6.25') = }")
print(F"{decimal_to_fraction('78td') = }")
| 65
|
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowercase (*SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Union[Dict, Any]] = None , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 ) -> Optional[int]:
from .. import __version__
SCREAMING_SNAKE_CASE = take_from
SCREAMING_SNAKE_CASE = ()
if not isinstance(args[0] , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(SCREAMING_SNAKE_CASE_ ).base_version ) >= version.parse(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
F' version {__version__} is >= {version_name}' )
SCREAMING_SNAKE_CASE = None
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(SCREAMING_SNAKE_CASE_ ),)
SCREAMING_SNAKE_CASE = F'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
values += (getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),)
SCREAMING_SNAKE_CASE = F'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
SCREAMING_SNAKE_CASE = F'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
SCREAMING_SNAKE_CASE = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , SCREAMING_SNAKE_CASE_ , stacklevel=SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) > 0:
SCREAMING_SNAKE_CASE = inspect.getouterframes(inspect.currentframe() )[1]
SCREAMING_SNAKE_CASE = call_frame.filename
SCREAMING_SNAKE_CASE = call_frame.lineno
SCREAMING_SNAKE_CASE = call_frame.function
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return
elif len(SCREAMING_SNAKE_CASE_ ) == 1:
return values[0]
return values
| 247
| 0
|
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class UpperCamelCase ( __a , unittest.TestCase ):
a__ :Dict = XLMProphetNetTokenizer
a__ :List[Any] = False
a__ :str = True
def A_ (self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_ : str = XLMProphetNetTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A_ (self ) -> str:
UpperCamelCase_ : int = """[PAD]"""
UpperCamelCase_ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def A_ (self ) -> int:
UpperCamelCase_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """[PAD]""" )
self.assertEqual(vocab_keys[1] , """[CLS]""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__UpperCamelCase ) , 1_012 )
def A_ (self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def A_ (self ) -> List[str]:
UpperCamelCase_ : Dict = XLMProphetNetTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
UpperCamelCase_ : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__UpperCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCamelCase_ : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCamelCase_ : List[str] = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
UpperCamelCase_ : int = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""[UNK]""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""[UNK]""",
""".""",
] , )
@cached_property
def A_ (self ) -> Optional[int]:
return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" )
@slow
def A_ (self ) -> Optional[Any]:
UpperCamelCase_ : List[Any] = """Hello World!"""
UpperCamelCase_ : Any = [35_389, 6_672, 49, 2]
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@slow
def A_ (self ) -> Optional[int]:
# fmt: off
UpperCamelCase_ : Optional[int] = {"""input_ids""": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="""microsoft/xprophetnet-large-wiki100-cased""" , revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""" , )
| 138
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : int = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 138
| 1
|
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _A ( unittest.TestCase ):
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
debug_launcher(test_script.main )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
debug_launcher(test_ops.main )
| 26
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Any = Mock()
_lowerCAmelCase : Dict = conn, Mock()
_lowerCAmelCase : Optional[int] = iter([1, None] )
_lowerCAmelCase : Union[str, Any] = lambda lowerCAmelCase__ : next(lowerCAmelCase__ )
# ===== invoke =====
send_file(filename="mytext.txt" , testing=lowerCAmelCase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 424
| 0
|
'''simple docstring'''
class _a :
"""simple docstring"""
def __init__( self ):
SCREAMING_SNAKE_CASE : Optional[Any] = {}
def __a ( self ):
print(self.vertex )
for i in self.vertex:
print(UpperCamelCase__ ,' -> ' ,' -> '.join([str(UpperCamelCase__ ) for j in self.vertex[i]] ) )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCamelCase__ )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE : Union[str, Any] = [to_vertex]
def __a ( self ):
# visited array for storing already visited nodes
SCREAMING_SNAKE_CASE : List[Any] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCamelCase__ ,UpperCamelCase__ )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
# mark start vertex as visited
SCREAMING_SNAKE_CASE : str = True
print(UpperCamelCase__ ,end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCamelCase__ ,UpperCamelCase__ )
if __name__ == "__main__":
__UpperCAmelCase = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 709
|
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Dict ) -> List[str]:
# load base model
SCREAMING_SNAKE_CASE : int = StableDiffusionPipeline.from_pretrained(snake_case_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
SCREAMING_SNAKE_CASE : List[str] = load_file(snake_case_ )
SCREAMING_SNAKE_CASE : List[str] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
SCREAMING_SNAKE_CASE : int = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
SCREAMING_SNAKE_CASE : int = pipeline.text_encoder
else:
SCREAMING_SNAKE_CASE : Optional[int] = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline.unet
# find the target layer
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_infos.pop(0 )
while len(snake_case_ ) > -1:
try:
SCREAMING_SNAKE_CASE : int = curr_layer.__getattr__(snake_case_ )
if len(snake_case_ ) > 0:
SCREAMING_SNAKE_CASE : int = layer_infos.pop(0 )
elif len(snake_case_ ) == 0:
break
except Exception:
if len(snake_case_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
SCREAMING_SNAKE_CASE : List[str] = layer_infos.pop(0 )
SCREAMING_SNAKE_CASE : str = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(snake_case_ )
else:
pair_keys.append(snake_case_ )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
SCREAMING_SNAKE_CASE : Any = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
SCREAMING_SNAKE_CASE : List[str] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(snake_case_ , snake_case_ ).unsqueeze(2 ).unsqueeze(3 )
else:
SCREAMING_SNAKE_CASE : Tuple = state_dict[pair_keys[0]].to(torch.floataa )
SCREAMING_SNAKE_CASE : Any = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(snake_case_ , snake_case_ )
# update visited list
for item in pair_keys:
visited.append(snake_case_ )
return pipeline
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = args.base_model_path
__UpperCAmelCase = args.checkpoint_path
__UpperCAmelCase = args.dump_path
__UpperCAmelCase = args.lora_prefix_unet
__UpperCAmelCase = args.lora_prefix_text_encoder
__UpperCAmelCase = args.alpha
__UpperCAmelCase = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__UpperCAmelCase = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 220
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : Tuple = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : List[str] = {
"yjernite/retribert-base-uncased": 512,
}
SCREAMING_SNAKE_CASE : Optional[Any] = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : int =VOCAB_FILES_NAMES
lowercase : int =PRETRAINED_VOCAB_FILES_MAP
lowercase : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[Any] =PRETRAINED_INIT_CONFIGURATION
lowercase : Tuple =RetriBertTokenizer
lowercase : Dict =["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_="[UNK]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase_ :Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars
):
lowercase_ :Tuple = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) )
lowercase_ :int = do_lower_case
lowercase_ :int = strip_accents
lowercase_ :List[Any] = tokenize_chinese_chars
lowercase_ :Tuple = normalizer_class(**UpperCamelCase_ )
lowercase_ :Any = do_lower_case
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=None ):
lowercase_ :str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
lowercase_ :Optional[Any] = [self.sep_token_id]
lowercase_ :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
lowercase_ :int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 257
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
SCREAMING_SNAKE_CASE : Union[str, Any] = get_tests_dir("fixtures")
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# A mock response for an HTTP head request to emulate server down
lowercase_ :str = mock.Mock()
lowercase_ :Dict = 500
lowercase_ :List[Any] = {}
lowercase_ :str = HTTPError
lowercase_ :Optional[int] = {}
# Download this model to make sure it's in the cache.
lowercase_ :Any = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=UpperCamelCase_ ) as mock_head:
lowercase_ :Dict = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase ( self ):
# This test is for deprecated behavior and can be removed in v5
lowercase_ :Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase ( cls ):
lowercase_ :Optional[Any] = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def UpperCamelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def UpperCamelCase ( self ):
lowercase_ :int = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
lowercase_ :List[Any] = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCamelCase_ , repo_id='''test-feature-extractor''' , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
lowercase_ :Optional[int] = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def UpperCamelCase ( self ):
lowercase_ :Tuple = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
lowercase_ :str = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCamelCase_ , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
lowercase_ :Optional[Any] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def UpperCamelCase ( self ):
CustomFeatureExtractor.register_for_auto_class()
lowercase_ :str = CustomFeatureExtractor.from_pretrained(UpperCamelCase_ )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
lowercase_ :str = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=UpperCamelCase_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 257
| 1
|
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
lowercase__ :int = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 374
|
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->Any:
"""simple docstring"""
__UpperCAmelCase : Tuple = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
__UpperCAmelCase : Tuple = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw ).convert('''RGB''' )
__UpperCAmelCase : Dict = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ),
] )
__UpperCAmelCase : Optional[Any] = transform(UpperCAmelCase_ ).unsqueeze(0 ).to(UpperCAmelCase_ )
return image
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Optional[Any]:
"""simple docstring"""
if "visual_encoder" in key:
__UpperCAmelCase : Tuple = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , UpperCAmelCase_ )
if "blocks" in key:
__UpperCAmelCase : List[str] = re.sub(R'''blocks''' , '''layers''' , UpperCAmelCase_ )
if "attn" in key:
__UpperCAmelCase : Union[str, Any] = re.sub(R'''attn''' , '''self_attn''' , UpperCAmelCase_ )
if "norm1" in key:
__UpperCAmelCase : Optional[Any] = re.sub(R'''norm1''' , '''layer_norm1''' , UpperCAmelCase_ )
if "norm2" in key:
__UpperCAmelCase : Optional[int] = re.sub(R'''norm2''' , '''layer_norm2''' , UpperCAmelCase_ )
if "encoder.norm" in key:
__UpperCAmelCase : Union[str, Any] = re.sub(R'''encoder.norm''' , '''post_layernorm''' , UpperCAmelCase_ )
if "encoder.patch_embed.proj" in key:
__UpperCAmelCase : Optional[int] = re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , UpperCAmelCase_ )
if "encoder.pos_embed" in key:
__UpperCAmelCase : List[Any] = re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , UpperCAmelCase_ )
if "encoder.cls_token" in key:
__UpperCAmelCase : List[str] = re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , UpperCAmelCase_ )
if "self_attn" in key:
__UpperCAmelCase : Any = re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , UpperCAmelCase_ )
return key
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_=None ) ->Dict:
"""simple docstring"""
if config_path is not None:
__UpperCAmelCase : Optional[int] = BlipConfig.from_pretrained(UpperCAmelCase_ )
else:
__UpperCAmelCase : Optional[int] = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
__UpperCAmelCase : Optional[Any] = BlipForConditionalGeneration(UpperCAmelCase_ ).eval()
__UpperCAmelCase : List[str] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
__UpperCAmelCase : List[Any] = blip_decoder(pretrained=UpperCAmelCase_ , image_size=3_84 , vit='''base''' )
__UpperCAmelCase : str = pt_model.eval()
__UpperCAmelCase : List[Any] = pt_model.state_dict()
for key in modified_state_dict.copy():
__UpperCAmelCase : List[str] = modified_state_dict.pop(UpperCAmelCase_ )
__UpperCAmelCase : Optional[int] = rename_key(UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = value
hf_model.load_state_dict(UpperCAmelCase_ )
__UpperCAmelCase : str = 3_84
__UpperCAmelCase : Tuple = load_demo_image(image_size=UpperCAmelCase_ , device='''cpu''' )
__UpperCAmelCase : Optional[Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__UpperCAmelCase : Optional[int] = tokenizer(['''a picture of'''] ).input_ids
__UpperCAmelCase : Union[str, Any] = hf_model.generate(UpperCAmelCase_ , UpperCAmelCase_ )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
__UpperCAmelCase : Optional[int] = hf_model.generate(UpperCAmelCase_ )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(UpperCAmelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__UpperCAmelCase : Optional[Any] = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
__UpperCAmelCase : List[str] = blip_vqa(pretrained=UpperCAmelCase_ , image_size=UpperCAmelCase_ , vit='''base''' )
vqa_model.eval()
__UpperCAmelCase : Dict = vqa_model.state_dict()
for key in modified_state_dict.copy():
__UpperCAmelCase : List[Any] = modified_state_dict.pop(UpperCAmelCase_ )
__UpperCAmelCase : int = rename_key(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = value
__UpperCAmelCase : List[str] = BlipForQuestionAnswering(UpperCAmelCase_ )
hf_vqa_model.load_state_dict(UpperCAmelCase_ )
__UpperCAmelCase : str = ['''How many dogs are in this image?''']
__UpperCAmelCase : Dict = tokenizer(UpperCAmelCase_ , return_tensors='''pt''' ).input_ids
__UpperCAmelCase : Any = hf_vqa_model.generate(UpperCAmelCase_ , UpperCAmelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
__UpperCAmelCase : List[Any] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
__UpperCAmelCase : List[str] = blip_itm(pretrained=UpperCAmelCase_ , image_size=UpperCAmelCase_ , vit='''base''' )
itm_model.eval()
__UpperCAmelCase : Any = itm_model.state_dict()
for key in modified_state_dict.copy():
__UpperCAmelCase : List[str] = modified_state_dict.pop(UpperCAmelCase_ )
__UpperCAmelCase : Dict = rename_key(UpperCAmelCase_ )
__UpperCAmelCase : int = value
__UpperCAmelCase : Optional[int] = BlipForImageTextRetrieval(UpperCAmelCase_ )
__UpperCAmelCase : List[str] = ['''A picture of a woman with a dog sitting in a beach''']
__UpperCAmelCase : Optional[int] = tokenizer(
UpperCAmelCase_ , return_tensors='''pt''' , padding='''max_length''' , truncation=UpperCAmelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(UpperCAmelCase_ )
hf_itm_model.eval()
__UpperCAmelCase : List[Any] = hf_itm_model(UpperCAmelCase_ , UpperCAmelCase_ , use_itm_head=UpperCAmelCase_ )
__UpperCAmelCase : int = hf_itm_model(UpperCAmelCase_ , UpperCAmelCase_ , use_itm_head=UpperCAmelCase_ )
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
lowercase__ :str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
lowercase__ :int = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 374
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
UpperCamelCase_ : int = logging.get_logger(__name__)
@dataclass
class __lowercase :
def __init__(self : Dict , snake_case : List[str]=False , snake_case : int=False , snake_case : Tuple=6.0 , snake_case : List[str]=None , snake_case : Tuple=False , snake_case : Union[str, Any]=False , snake_case : Dict=None , snake_case : Dict="fp4" , snake_case : str=False , **snake_case : Dict , ) -> List[Any]:
_lowercase : Tuple = load_in_abit
_lowercase : str = load_in_abit
_lowercase : Tuple = llm_inta_threshold
_lowercase : Any = llm_inta_skip_modules
_lowercase : Any = llm_inta_enable_fpaa_cpu_offload
_lowercase : Union[str, Any] = llm_inta_has_fpaa_weight
_lowercase : Any = bnb_abit_quant_type
_lowercase : int = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
_lowercase : Optional[int] = torch.floataa
elif isinstance(snake_case , snake_case ):
_lowercase : Union[str, Any] = getattr(snake_case , snake_case )
elif isinstance(snake_case , torch.dtype ):
_lowercase : int = bnb_abit_compute_dtype
else:
raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype" )
self.post_init()
def _a(self : Union[str, Any] ) -> Optional[int]:
if not isinstance(self.llm_inta_threshold , snake_case ):
raise ValueError("llm_int8_threshold must be a float" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , snake_case ):
raise ValueError("llm_int8_skip_modules must be a list of strings" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , snake_case ):
raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean" )
if not isinstance(self.llm_inta_has_fpaa_weight , snake_case ):
raise ValueError("llm_int8_has_fp16_weight must be a boolean" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("bnb_4bit_compute_dtype must be torch.dtype" )
if not isinstance(self.bnb_abit_quant_type , snake_case ):
raise ValueError("bnb_4bit_quant_type must be a string" )
if not isinstance(self.bnb_abit_use_double_quant , snake_case ):
raise ValueError("bnb_4bit_use_double_quant must be a boolean" )
if self.load_in_abit and not version.parse(importlib.metadata.version("bitsandbytes" ) ) >= version.parse(
"0.39.0" ):
raise ValueError(
"4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" )
def _a(self : Tuple ) -> Union[str, Any]:
return self.load_in_abit or self.load_in_abit
def _a(self : Dict ) -> Union[str, Any]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _a(cls : List[Any] , snake_case : str , snake_case : Any , **snake_case : Tuple ) -> Optional[Any]:
_lowercase : Dict = cls(**snake_case )
_lowercase : Optional[int] = []
for key, value in kwargs.items():
if hasattr(snake_case , snake_case ):
setattr(snake_case , snake_case , snake_case )
to_remove.append(snake_case )
for key in to_remove:
kwargs.pop(snake_case , snake_case )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _a(self : int , snake_case : Union[str, os.PathLike] ) -> Union[str, Any]:
with open(snake_case , "w" , encoding="utf-8" ) as writer:
_lowercase : str = self.to_dict()
_lowercase : Optional[int] = json.dumps(snake_case , indent=2 , sort_keys=snake_case ) + "\n"
writer.write(snake_case )
def _a(self : Optional[Any] ) -> Dict[str, Any]:
_lowercase : List[Any] = copy.deepcopy(self.__dict__ )
_lowercase : str = str(output["bnb_4bit_compute_dtype"] ).split("." )[1]
return output
def __repr__(self : Union[str, Any] ) -> int:
return F"""{self.__class__.__name__} {self.to_json_string()}"""
def _a(self : List[Any] , snake_case : bool = True ) -> str:
if use_diff is True:
_lowercase : Optional[Any] = self.to_diff_dict()
else:
_lowercase : List[str] = self.to_dict()
return json.dumps(snake_case , indent=2 , sort_keys=snake_case ) + "\n"
def _a(self : Optional[Any] ) -> Dict[str, Any]:
_lowercase : Dict = self.to_dict()
# get the default config dict
_lowercase : Dict = BitsAndBytesConfig().to_dict()
_lowercase : Dict = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
_lowercase : Optional[Any] = value
return serializable_config_dict
| 461
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class __lowercase :
_A = None
_A = False
_A = False
_A = False
_A = None
_A = None
_A = False
_A = False
_A = False
_A = True
_A = None
_A = 1
_A = None
_A = False
_A = None
_A = None
def _a(self : str ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(snake_case ) for k, v in self.__dict__.items()} )
| 461
| 1
|
def __magic_name__ ( __a : int , __a : int ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def __magic_name__ ( ):
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 703
|
from PIL import Image
def __magic_name__ ( __a : Image , __a : float ):
'''simple docstring'''
def brightness(__a : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(__a )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowerCamelCase_ = change_brightness(img, 1_00)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 86
| 0
|
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase = "▁"
__lowerCamelCase = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class _snake_case ( A__ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ =BigBirdTokenizer
UpperCamelCase__ =BigBirdTokenizerFast
UpperCamelCase__ =True
UpperCamelCase__ =True
def snake_case_ ( self : Union[str, Any] ):
super().setUp()
UpperCAmelCase_ :List[Any] = self.tokenizer_class(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self : str ):
UpperCAmelCase_ :List[str] = """<s>"""
UpperCAmelCase_ :str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def snake_case_ ( self : Any ):
UpperCAmelCase_ :Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(lowerCamelCase__ ) , 1_004 )
def snake_case_ ( self : Optional[int] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def snake_case_ ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ :Any = self.get_tokenizer()
UpperCAmelCase_ :str = self.get_rust_tokenizer()
UpperCAmelCase_ :List[Any] = """I was born in 92000, and this is falsé."""
UpperCAmelCase_ :List[str] = tokenizer.tokenize(lowerCamelCase__ )
UpperCAmelCase_ :Optional[int] = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase_ :str = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCAmelCase_ :Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase_ :Tuple = self.get_rust_tokenizer()
UpperCAmelCase_ :Any = tokenizer.encode(lowerCamelCase__ )
UpperCAmelCase_ :Tuple = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case_ ( self : Optional[Any] ):
UpperCAmelCase_ :Dict = BigBirdTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
UpperCAmelCase_ :str = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [285, 46, 10, 170, 382] , )
UpperCAmelCase_ :List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ :Tuple = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ :Optional[int] = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def snake_case_ ( self : Dict ):
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def snake_case_ ( self : List[str] ):
UpperCAmelCase_ :Dict = """Hello World!"""
UpperCAmelCase_ :Any = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def snake_case_ ( self : str ):
UpperCAmelCase_ :Optional[Any] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
UpperCAmelCase_ :Any = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@require_torch
@slow
def snake_case_ ( self : str ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase_ :Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase_ :Optional[Any] = """ """.join(lowerCamelCase__ )
UpperCAmelCase_ :Optional[int] = self.big_tokenizer.encode_plus(lowerCamelCase__ , return_tensors='''pt''' , return_token_type_ids=lowerCamelCase__ )
UpperCAmelCase_ :List[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=lowerCamelCase__ )
UpperCAmelCase_ :Optional[int] = BigBirdConfig(attention_type='''original_full''' )
UpperCAmelCase_ :List[str] = BigBirdModel(lowerCamelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase__ )
model(**lowerCamelCase__ )
@slow
def snake_case_ ( self : List[str] ):
UpperCAmelCase_ :Any = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
UpperCAmelCase_ :Any = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def snake_case_ ( self : Union[str, Any] ):
# fmt: off
UpperCAmelCase_ :int = {"""input_ids""": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 608
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A ( lowercase__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase__ :Union[str, Any] = {}
UpperCamelCase__ :Optional[int] = tokenizer(example["""content"""] , truncation=lowercase__ )["""input_ids"""]
UpperCamelCase__ :int = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCamelCase = HfArgumentParser(PretokenizationArguments)
UpperCamelCase = parser.parse_args()
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase = time.time()
UpperCamelCase = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 45
| 0
|
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = ["""pixel_values"""]
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = IMAGENET_DEFAULT_MEAN , lowercase = IMAGENET_DEFAULT_STD , **lowercase , ):
super().__init__(**__UpperCamelCase )
_lowerCamelCase : Dict = size if size is not None else {'shortest_edge': 224}
_lowerCamelCase : List[str] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_lowerCamelCase : Union[str, Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_lowerCamelCase : Any = get_size_dict(__UpperCamelCase , param_name='crop_size' )
_lowerCamelCase : Optional[Any] = do_resize
_lowerCamelCase : Optional[Any] = size
_lowerCamelCase : int = resample
_lowerCamelCase : Union[str, Any] = do_center_crop
_lowerCamelCase : Any = crop_size
_lowerCamelCase : List[str] = do_rescale
_lowerCamelCase : Tuple = rescale_factor
_lowerCamelCase : Optional[int] = do_normalize
_lowerCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowerCamelCase : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A_ ( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ):
_lowerCamelCase : Union[str, Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_lowerCamelCase : Optional[Any] = int((256 / 224) * size['shortest_edge'] )
_lowerCamelCase : Optional[Any] = get_resize_output_image_size(__UpperCamelCase , size=__UpperCamelCase , default_to_square=__UpperCamelCase )
_lowerCamelCase : List[Any] = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
__UpperCamelCase , size=(size_dict['height'], size_dict['width']) , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase , ):
_lowerCamelCase : Dict = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(__UpperCamelCase , size=(size['height'], size['width']) , data_format=__UpperCamelCase , **__UpperCamelCase )
def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def A_ ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
_lowerCamelCase : Dict = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : Any = resample if resample is not None else self.resample
_lowerCamelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : Optional[Any] = image_std if image_std is not None else self.image_std
_lowerCamelCase : List[Any] = size if size is not None else self.size
_lowerCamelCase : Dict = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_lowerCamelCase : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
_lowerCamelCase : Optional[int] = get_size_dict(__UpperCamelCase , param_name='crop_size' )
_lowerCamelCase : Union[str, Any] = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_lowerCamelCase : List[Any] = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
_lowerCamelCase : Any = [self.resize(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for image in images]
if do_center_crop:
_lowerCamelCase : Union[str, Any] = [self.center_crop(__UpperCamelCase , __UpperCamelCase ) for image in images]
if do_rescale:
_lowerCamelCase : Union[str, Any] = [self.rescale(__UpperCamelCase , __UpperCamelCase ) for image in images]
if do_normalize:
_lowerCamelCase : Dict = [self.normalize(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for image in images]
_lowerCamelCase : Optional[Any] = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
_lowerCamelCase : Tuple = {'pixel_values': images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 702
|
"""simple docstring"""
def _snake_case ( lowercase__ ):
return "".join(chr(ord(lowercase__ ) - 32 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 492
| 0
|
'''simple docstring'''
def _snake_case ( A_ : int = 100 ):
"""simple docstring"""
a_ : str = 0
a_ : int = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 577
|
'''simple docstring'''
def _snake_case ( A_ : Optional[int] ):
"""simple docstring"""
a_ : str = len(A_ )
for i in range(length - 1 ):
a_ : List[Any] = i
for k in range(i + 1 , A_ ):
if collection[k] < collection[least]:
a_ : Union[str, Any] = k
if least != i:
a_ , a_ : int = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__snake_case: Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
__snake_case: Any = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 577
| 1
|
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = FlaxAutoModelForSeqaSeqLM.from_config(config=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""]
if config.model_type == "t5":
_SCREAMING_SNAKE_CASE = """SelfAttention"""
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_SCREAMING_SNAKE_CASE = """LocalSelfAttention"""
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_SCREAMING_SNAKE_CASE = """TransientGlobalSelfAttention"""
else:
raise ValueError(
"""Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"""
""" attribute with a value from ['local', 'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
_SCREAMING_SNAKE_CASE = F"layers_{str(SCREAMING_SNAKE_CASE_ )}"
# Self-Attention
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""]
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""]
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""]
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""]
# Layer Normalization
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""]
if split_mlp_wi:
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
_SCREAMING_SNAKE_CASE = flax_model.params["""encoder"""]["""block"""][str(SCREAMING_SNAKE_CASE_ )]["""layer"""]
_SCREAMING_SNAKE_CASE = tax_attention_key
_SCREAMING_SNAKE_CASE = tax_attention_out
_SCREAMING_SNAKE_CASE = tax_attention_query
_SCREAMING_SNAKE_CASE = tax_attention_value
_SCREAMING_SNAKE_CASE = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_SCREAMING_SNAKE_CASE = tax_global_layer_norm
if split_mlp_wi:
_SCREAMING_SNAKE_CASE = tax_mlp_wi_a
_SCREAMING_SNAKE_CASE = tax_mlp_wi_a
else:
_SCREAMING_SNAKE_CASE = tax_mlp_wi
_SCREAMING_SNAKE_CASE = tax_mlp_wo
_SCREAMING_SNAKE_CASE = tax_mlp_layer_norm
_SCREAMING_SNAKE_CASE = flax_model_encoder_layer_block
# Only for layer 0:
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T
_SCREAMING_SNAKE_CASE = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T
_SCREAMING_SNAKE_CASE = tax_encoder_global_rel_embedding
# Assigning
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""]
_SCREAMING_SNAKE_CASE = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_SCREAMING_SNAKE_CASE = F"layers_{str(SCREAMING_SNAKE_CASE_ )}"
# Self-Attention
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""]
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""]
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""]
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""]
# Layer Normalization
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][
"""scale"""
]
# Encoder-Decoder-Attention
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""]
_SCREAMING_SNAKE_CASE = tax_enc_dec_attention_module["""key"""]["""kernel"""]
_SCREAMING_SNAKE_CASE = tax_enc_dec_attention_module["""out"""]["""kernel"""]
_SCREAMING_SNAKE_CASE = tax_enc_dec_attention_module["""query"""]["""kernel"""]
_SCREAMING_SNAKE_CASE = tax_enc_dec_attention_module["""value"""]["""kernel"""]
# Layer Normalization
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""]
# MLP
if split_mlp_wi:
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
_SCREAMING_SNAKE_CASE = flax_model.params["""decoder"""]["""block"""][str(SCREAMING_SNAKE_CASE_ )]["""layer"""]
_SCREAMING_SNAKE_CASE = tax_attention_key
_SCREAMING_SNAKE_CASE = tax_attention_out
_SCREAMING_SNAKE_CASE = tax_attention_query
_SCREAMING_SNAKE_CASE = tax_attention_value
_SCREAMING_SNAKE_CASE = tax_pre_attention_layer_norm
_SCREAMING_SNAKE_CASE = tax_enc_dec_attention_key
_SCREAMING_SNAKE_CASE = tax_enc_dec_attention_out
_SCREAMING_SNAKE_CASE = tax_enc_dec_attention_query
_SCREAMING_SNAKE_CASE = tax_enc_dec_attention_value
_SCREAMING_SNAKE_CASE = tax_cross_layer_norm
if split_mlp_wi:
_SCREAMING_SNAKE_CASE = tax_mlp_wi_a
_SCREAMING_SNAKE_CASE = tax_mlp_wi_a
else:
_SCREAMING_SNAKE_CASE = tax_mlp_wi
_SCREAMING_SNAKE_CASE = tax_mlp_wo
_SCREAMING_SNAKE_CASE = txa_mlp_layer_norm
_SCREAMING_SNAKE_CASE = flax_model_decoder_layer_block
# Decoder Normalization
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""]
_SCREAMING_SNAKE_CASE = txa_decoder_norm
# Only for layer 0:
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T
_SCREAMING_SNAKE_CASE = tax_decoder_rel_embedding
# Token Embeddings
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""token_embedder"""]["""embedding"""]
_SCREAMING_SNAKE_CASE = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_SCREAMING_SNAKE_CASE = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""]
flax_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
UpperCamelCase__ : int = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 0
|
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
UpperCamelCase__ : Tuple = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , *A__ , A__=None , A__=None , A__=None , **A__ ) -> Optional[int]:
super().__init__(*A__ , **A__ )
_SCREAMING_SNAKE_CASE = eval_examples
_SCREAMING_SNAKE_CASE = post_process_function
_SCREAMING_SNAKE_CASE = quant_trainer_args
_SCREAMING_SNAKE_CASE = 1_28 # default number of calibration samples
def UpperCamelCase ( self , A__=None ) -> Union[str, Any]:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
_SCREAMING_SNAKE_CASE = calib_dataset if calib_dataset is not None else self.calib_dataset
_SCREAMING_SNAKE_CASE = self._remove_unused_columns(A__ , description="""Calibration""" )
return DataLoader(
A__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=A__ , )
def UpperCamelCase ( self , A__=None ) -> str:
_SCREAMING_SNAKE_CASE = self.train_dataset if calib_dataset is None else calib_dataset
_SCREAMING_SNAKE_CASE = self.get_calib_dataloader(A__ )
_SCREAMING_SNAKE_CASE = self.model
quant_trainer.configure_model(A__ , self.quant_trainer_args , calib=A__ )
model.eval()
quant_trainer.enable_calibration(A__ )
logger.info("""***** Running calibration *****""" )
logger.info(F" Num examples = {self.calib_num}" )
logger.info(F" Batch size = {calib_dataloader.batch_size}" )
for step, inputs in enumerate(A__ ):
# Prediction step
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.prediction_step(A__ , A__ , prediction_loss_only=A__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(A__ , self.quant_trainer_args )
_SCREAMING_SNAKE_CASE = model
def UpperCamelCase ( self , A__=None , A__=None , A__=None , A__ = "eval" ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.eval_dataset if eval_dataset is None else eval_dataset
_SCREAMING_SNAKE_CASE = self.get_eval_dataloader(A__ )
_SCREAMING_SNAKE_CASE = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_SCREAMING_SNAKE_CASE = self.compute_metrics
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_SCREAMING_SNAKE_CASE = eval_loop(
A__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , )
finally:
_SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
_SCREAMING_SNAKE_CASE = self.post_process_function(A__ , A__ , output.predictions )
_SCREAMING_SNAKE_CASE = self.compute_metrics(A__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
_SCREAMING_SNAKE_CASE = metrics.pop(A__ )
self.log(A__ )
else:
_SCREAMING_SNAKE_CASE = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_SCREAMING_SNAKE_CASE = self.callback_handler.on_evaluate(self.args , self.state , self.control , A__ )
return metrics
def UpperCamelCase ( self , A__ , A__ , A__=None , A__ = "test" ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.get_test_dataloader(A__ )
# Temporarily disable metric computation, we will do it in the loop here.
_SCREAMING_SNAKE_CASE = self.compute_metrics
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_SCREAMING_SNAKE_CASE = eval_loop(
A__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , )
finally:
_SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
_SCREAMING_SNAKE_CASE = self.post_process_function(A__ , A__ , output.predictions , """predict""" )
_SCREAMING_SNAKE_CASE = self.compute_metrics(A__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
_SCREAMING_SNAKE_CASE = metrics.pop(A__ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A__ )
def UpperCamelCase ( self , A__="./" ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.eval_dataset
_SCREAMING_SNAKE_CASE = self.get_eval_dataloader(A__ )
_SCREAMING_SNAKE_CASE = next(iter(A__ ) )
# saving device - to make it consistent
_SCREAMING_SNAKE_CASE = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
_SCREAMING_SNAKE_CASE = tuple(v.to(A__ ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = self.model.to(A__ )
model.eval()
model.float()
_SCREAMING_SNAKE_CASE = model.module if hasattr(A__ , """module""" ) else model
quant_trainer.configure_model(A__ , self.quant_trainer_args )
_SCREAMING_SNAKE_CASE = os.path.join(A__ , """model.onnx""" )
logger.info(F"exporting model to {output_model_file}" )
_SCREAMING_SNAKE_CASE = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
A__ , A__ , A__ , export_params=A__ , opset_version=13 , do_constant_folding=A__ , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=A__ , )
logger.info("""onnx export finished""" )
| 0
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Dict = ["pixel_values"]
def __init__( self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = 1 / 255 , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , **SCREAMING_SNAKE_CASE__ , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A__ = size if size is not None else {"height": 384, "width": 384}
A__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
A__ = do_resize
A__ = size
A__ = resample
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A__ = image_std if image_std is not None else OPENAI_CLIP_STD
A__ = do_convert_rgb
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> np.ndarray:
A__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
A__ = (size["height"], size["width"])
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> str:
return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> np.ndarray:
return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ) -> PIL.Image.Image:
A__ = do_resize if do_resize is not None else self.do_resize
A__ = resample if resample is not None else self.resample
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A__ = size if size is not None else self.size
A__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
A__ = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A__ = [convert_to_rgb(SCREAMING_SNAKE_CASE__ ) for image in images]
# All transformations expect numpy arrays.
A__ = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
A__ = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_rescale:
A__ = [self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
A__ = [self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ ) for image in images]
A__ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
A__ = BatchFeature(data={"pixel_values": images} , tensor_type=SCREAMING_SNAKE_CASE__ )
return encoded_outputs
| 104
|
"""simple docstring"""
import random
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ) -> dict:
'''simple docstring'''
lowerCamelCase__ ={i: [] for i in range(__lowerCAmelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(__lowerCAmelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(__lowerCAmelCase ):
for j in range(i + 1 , __lowerCAmelCase ):
if random.random() < probability:
graph[i].append(__lowerCAmelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(__lowerCAmelCase )
return graph
def lowerCamelCase_ ( __lowerCAmelCase ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(__lowerCAmelCase ) if i != j] for i in range(__lowerCAmelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 530
| 0
|
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__snake_case = data_utils.TransfoXLTokenizer
__snake_case = data_utils.TransfoXLCorpus
__snake_case = data_utils
__snake_case = data_utils
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Any:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(SCREAMING_SNAKE_CASE_ , """rb""" ) as fp:
lowercase_ = pickle.load(SCREAMING_SNAKE_CASE_ , encoding="""latin1""" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
lowercase_ = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" )
lowercase_ = corpus.vocab.__dict__
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""" , SCREAMING_SNAKE_CASE_ )
lowercase_ = pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(f"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
lowercase_ = os.path.abspath(SCREAMING_SNAKE_CASE_ )
lowercase_ = os.path.abspath(SCREAMING_SNAKE_CASE_ )
print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
lowercase_ = TransfoXLConfig()
else:
lowercase_ = TransfoXLConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(f"""Building PyTorch model from configuration: {config}""" )
lowercase_ = TransfoXLLMHeadModel(SCREAMING_SNAKE_CASE_ )
lowercase_ = load_tf_weights_in_transfo_xl(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
lowercase_ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(f"""Save PyTorch model to {os.path.abspath(SCREAMING_SNAKE_CASE_ )}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
print(f"""Save configuration file to {os.path.abspath(SCREAMING_SNAKE_CASE_ )}""" )
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
__snake_case = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 603
|
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
__snake_case = re.compile(r"""([A-Z]+)([A-Z][a-z])""")
__snake_case = re.compile(r"""([a-z\d])([A-Z])""")
__snake_case = re.compile(r"""(?<!_)_(?!_)""")
__snake_case = re.compile(r"""(_{2,})""")
__snake_case = r"""^\w+(\.\w+)*$"""
__snake_case = r"""<>:/\|?*"""
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]:
lowercase_ = _uppercase_uppercase_re.sub(r"""\1_\2""" , SCREAMING_SNAKE_CASE_ )
lowercase_ = _lowercase_uppercase_re.sub(r"""\1_\2""" , SCREAMING_SNAKE_CASE_ )
return name.lower()
def A_ ( SCREAMING_SNAKE_CASE_ ) ->List[Any]:
lowercase_ = _single_underscore_re.split(SCREAMING_SNAKE_CASE_ )
lowercase_ = [_multiple_underscores_re.split(SCREAMING_SNAKE_CASE_ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) if n != """""" )
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Any:
if os.path.basename(SCREAMING_SNAKE_CASE_ ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(SCREAMING_SNAKE_CASE_ )
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Any:
if os.path.basename(SCREAMING_SNAKE_CASE_ ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , SCREAMING_SNAKE_CASE_ ):
raise ValueError(f"""Split name should match '{_split_re}'' but got '{split}'.""" )
return f"""{filename_prefix_for_name(SCREAMING_SNAKE_CASE_ )}-{split}"""
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) ->Tuple:
lowercase_ = filename_prefix_for_split(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if filetype_suffix:
prefix += f""".{filetype_suffix}"""
lowercase_ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return f"""{filepath}*"""
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) ->Optional[Any]:
lowercase_ = filename_prefix_for_split(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if shard_lengths:
lowercase_ = len(SCREAMING_SNAKE_CASE_ )
lowercase_ = [f"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(SCREAMING_SNAKE_CASE_ )]
if filetype_suffix:
lowercase_ = [filename + f""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
lowercase_ = prefix
if filetype_suffix:
filename += f""".{filetype_suffix}"""
return [filename]
| 603
| 1
|
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
a_ : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
a_ : list[int] = [ord(letter) for letter in string.ascii_lowercase]
a_ : set[int] = {ord(char) for char in VALID_CHARS}
a_ : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def a_ ( __snake_case : list[int] , __snake_case : tuple[int, ...] ) -> str | None:
"""simple docstring"""
lowerCamelCase_ =""
lowerCamelCase_ =42
lowerCamelCase_ =42
lowerCamelCase_ =42
for keychar, cipherchar in zip(cycle(__snake_case ) , __snake_case ):
lowerCamelCase_ =cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__snake_case )
return decoded
def a_ ( __snake_case : list[int] ) -> list[str]:
"""simple docstring"""
lowerCamelCase_ =[]
for key in product(__snake_case , repeat=3 ):
lowerCamelCase_ =try_key(__snake_case , __snake_case )
if encoded is not None:
possibles.append(__snake_case )
return possibles
def a_ ( __snake_case : list[str] , __snake_case : str ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def a_ ( __snake_case : str = "p059_cipher.txt" ) -> int:
"""simple docstring"""
lowerCamelCase_ =42
lowerCamelCase_ =42
lowerCamelCase_ =42
lowerCamelCase_ =42
lowerCamelCase_ =Path(__snake_case ).parent.joinpath(__snake_case ).read_text(encoding='''utf-8''' )
lowerCamelCase_ =[int(__snake_case ) for number in data.strip().split(''',''' )]
lowerCamelCase_ =filter_valid_chars(__snake_case )
for common_word in COMMON_WORDS:
lowerCamelCase_ =filter_common_word(__snake_case , __snake_case )
if len(__snake_case ) == 1:
break
lowerCamelCase_ =possibles[0]
return sum(ord(__snake_case ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 676
|
def A__ ( lowercase: Any, lowercase: List[Any], lowercase: List[Any]=False ) -> Dict:
if isinstance(lowercase, lowercase ) and isinstance(lowercase, lowercase ):
A : int =len(set_a.intersection(lowercase ) )
if alternative_union:
A : Tuple =len(lowercase ) + len(lowercase )
else:
A : Any =len(set_a.union(lowercase ) )
return intersection / union
if isinstance(lowercase, (list, tuple) ) and isinstance(lowercase, (list, tuple) ):
A : int =[element for element in set_a if element in set_b]
if alternative_union:
A : Union[str, Any] =len(lowercase ) + len(lowercase )
return len(lowercase ) / union
else:
A : Optional[Any] =set_a + [element for element in set_b if element not in set_a]
return len(lowercase ) / len(lowercase )
return len(lowercase ) / len(lowercase )
return None
if __name__ == "__main__":
_lowercase : str ={'''a''', '''b''', '''c''', '''d''', '''e'''}
_lowercase : List[Any] ={'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 305
| 0
|
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
_A : List[Any] = """facebook/wmt19-en-de"""
_A : Any = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
_A : Union[str, Any] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
_A : int = FSMTForConditionalGeneration(config)
print(F"num of params {tiny_model.num_parameters()}")
# Test
_A : List[Any] = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
_A : Dict = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
_A : Union[str, Any] = """tiny-wmt19-en-de"""
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 518
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ):
lowercase : List[str] = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
lowercase : Any = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
model.to(_a )
from datasets import load_dataset
lowercase : Any = load_dataset("nielsr/rvlcdip-demo" )
lowercase : List[str] = dataset["train"][0]["image"].convert("RGB" )
lowercase : str = image_processor(_a , return_tensors="pt" ).to(_a )
# forward pass
with torch.no_grad():
lowercase : Tuple = model(**_a )
lowercase : Dict = outputs.logits
lowercase : Union[str, Any] = torch.Size((1, 16) )
self.assertEqual(logits.shape , _a )
lowercase : int = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=_a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _a , atol=1E-4 ) )
| 518
| 1
|
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
lowercase__ : Tuple = tmp_path / """cache"""
lowercase__ : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : Optional[int] = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read()
_check_sql_dataset(lowercase_ , lowercase_ )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
lowercase__ : Optional[int] = tmp_path / """cache"""
lowercase__ : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowercase__ : Optional[int] = features.copy() if features else default_expected_features
lowercase__ : str = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase__ : Tuple = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=lowercase_ , cache_dir=lowercase_ ).read()
_check_sql_dataset(lowercase_ , lowercase_ )
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
with contextlib.closing(sqlitea.connect(lowercase_ ) ) as con:
lowercase__ : str = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
lowercase__ : str = tmp_path / """cache"""
lowercase__ : Optional[Any] = os.path.join(lowercase_ , """tmp.sql""" )
lowercase__ : Union[str, Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowercase_ ).read()
SqlDatasetWriter(lowercase_ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
lowercase__ : Optional[Any] = iter_sql_file(lowercase_ )
lowercase__ : int = iter_sql_file(lowercase_ )
for rowa, rowa in zip(lowercase_ , lowercase_ ):
assert rowa == rowa
@require_sqlalchemy
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : List[str] = tmp_path / """cache"""
lowercase__ : str = os.path.join(lowercase_ , """tmp.sql""" )
lowercase__ : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowercase_ ).read()
SqlDatasetWriter(lowercase_ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
lowercase__ : List[Any] = iter_sql_file(lowercase_ )
lowercase__ : Tuple = iter_sql_file(lowercase_ )
for rowa, rowa in zip(lowercase_ , lowercase_ ):
assert rowa == rowa
@require_sqlalchemy
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
lowercase__ : Tuple = tmp_path / """cache"""
lowercase__ : Union[str, Any] = os.path.join(lowercase_ , """tmp.sql""" )
lowercase__ : Dict = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowercase_ ).read()
with pytest.raises(lowercase_ ):
SqlDatasetWriter(lowercase_ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 12
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowercase__( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : PreTrainedTokenizer , _UpperCamelCase : int , _UpperCamelCase : Optional[int] = None , )-> List[Any]:
"""simple docstring"""
_UpperCamelCase = {}
if train_file is not None:
_UpperCamelCase = [train_file]
if eval_file is not None:
_UpperCamelCase = [eval_file]
if test_file is not None:
_UpperCamelCase = [test_file]
_UpperCamelCase = datasets.load_dataset("csv" , data_files=_UpperCamelCase )
_UpperCamelCase = list(ds[list(files.keys() )[0]].features.keys() )
_UpperCamelCase = features_name.pop(_UpperCamelCase )
_UpperCamelCase = list(set(ds[list(files.keys() )[0]][label_name] ) )
_UpperCamelCase = {label: i for i, label in enumerate(_UpperCamelCase )}
_UpperCamelCase = tokenizer.model_input_names
_UpperCamelCase = {}
if len(_UpperCamelCase ) == 1:
for k in files.keys():
_UpperCamelCase = ds[k].map(
lambda _UpperCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" ) , batched=_UpperCamelCase , )
elif len(_UpperCamelCase ) == 2:
for k in files.keys():
_UpperCamelCase = ds[k].map(
lambda _UpperCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , ) , batched=_UpperCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_UpperCamelCase = {k: v for k, v in ex.items() if k in input_names}
_UpperCamelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_UpperCamelCase = {k: v for k, v in ex.items() if k in input_names}
_UpperCamelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_UpperCamelCase = {k: v for k, v in ex.items() if k in input_names}
_UpperCamelCase = labelaid[ex[label_name]]
yield (d, label)
_UpperCamelCase = (
tf.data.Dataset.from_generator(
_UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_UpperCamelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_UpperCamelCase = (
tf.data.Dataset.from_generator(
_UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_UpperCamelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_UpperCamelCase = (
tf.data.Dataset.from_generator(
_UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_UpperCamelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
snake_case_ : Tuple = logging.getLogger(__name__)
@dataclass
class A_ :
'''simple docstring'''
_lowerCAmelCase = field(metadata={"""help""": """Which column contains the label"""} )
_lowerCAmelCase = field(default=lowerCAmelCase_ , metadata={"""help""": """The path of the training file"""} )
_lowerCAmelCase = field(default=lowerCAmelCase_ , metadata={"""help""": """The path of the development file"""} )
_lowerCAmelCase = field(default=lowerCAmelCase_ , metadata={"""help""": """The path of the test file"""} )
_lowerCAmelCase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowerCAmelCase = field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class A_ :
'''simple docstring'''
_lowerCAmelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowerCAmelCase = field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowerCAmelCase = field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowerCAmelCase = field(default=lowerCAmelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowerCAmelCase = field(
default=lowerCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
def lowercase__( )-> List[Any]:
"""simple docstring"""
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
f"16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_UpperCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_UpperCamelCase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_UpperCamelCase : EvalPrediction ) -> Dict:
_UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_UpperCamelCase = TFTrainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=_UpperCamelCase , eval_dataset=_UpperCamelCase , compute_metrics=_UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_UpperCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
results.update(_UpperCamelCase )
return results
if __name__ == "__main__":
main()
| 138
| 0
|
"""simple docstring"""
class _lowercase :
def __init__( self : Union[str, Any] , a : str = "" , a : bool = False ):
"""simple docstring"""
__snake_case : dict[str, RadixNode] ={}
# A node will be a leaf if the tree contains its word
__snake_case : Dict =is_leaf
__snake_case : List[str] =prefix
def _UpperCamelCase ( self : Dict , a : str ):
"""simple docstring"""
__snake_case : Optional[Any] =0
for q, w in zip(self.prefix , a ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def _UpperCamelCase ( self : Tuple , a : list[str] ):
"""simple docstring"""
for word in words:
self.insert(a )
def _UpperCamelCase ( self : Optional[int] , a : str ):
"""simple docstring"""
if self.prefix == word:
__snake_case : Optional[Any] =True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
__snake_case : List[str] =RadixNode(prefix=a , is_leaf=a )
else:
__snake_case : int =self.nodes[word[0]]
__snake_case , __snake_case , __snake_case : Any =incoming_node.match(
a )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(a )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
__snake_case : Union[str, Any] =remaining_prefix
__snake_case : Optional[int] =self.nodes[matching_string[0]]
__snake_case : Tuple =RadixNode(a , a )
__snake_case : Dict =aux_node
if remaining_word == "":
__snake_case : List[str] =True
else:
self.nodes[matching_string[0]].insert(a )
def _UpperCamelCase ( self : Optional[int] , a : str ):
"""simple docstring"""
__snake_case : Any =self.nodes.get(word[0] , a )
if not incoming_node:
return False
else:
__snake_case , __snake_case , __snake_case : Union[str, Any] =incoming_node.match(
a )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(a )
def _UpperCamelCase ( self : Union[str, Any] , a : str ):
"""simple docstring"""
__snake_case : List[str] =self.nodes.get(word[0] , a )
if not incoming_node:
return False
else:
__snake_case , __snake_case , __snake_case : List[str] =incoming_node.match(
a )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(a )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
__snake_case : Optional[Any] =list(self.nodes.values() )[0]
__snake_case : List[str] =merging_node.is_leaf
self.prefix += merging_node.prefix
__snake_case : str =merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
__snake_case : Any =False
# If there is 1 edge, we merge it with its child
else:
__snake_case : List[Any] =list(incoming_node.nodes.values() )[0]
__snake_case : str =merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
__snake_case : Any =merging_node.nodes
return True
def _UpperCamelCase ( self : int , a : int = 0 ):
"""simple docstring"""
if self.prefix != "":
print('''-''' * height , self.prefix , ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __lowercase ( ) -> bool:
__snake_case : Optional[Any] ='''banana bananas bandana band apple all beast'''.split()
__snake_case : List[Any] =RadixNode()
root.insert_many(a )
assert all(root.find(a ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def __lowercase ( ) -> None:
assert test_trie()
def __lowercase ( ) -> None:
__snake_case : Optional[Any] =RadixNode()
__snake_case : Optional[int] ='''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(a )
print('''Words:''' , a )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 497
|
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class _lowercase :
def __init__( self : Optional[Any] , a : List[str] , a : List[Any]=1_3 , a : Any=7 , a : List[str]=True , a : int=True , a : Optional[int]=9_9 , a : List[Any]=3_2 , a : Optional[int]=5 , a : List[Any]=4 , a : Dict=3_7 , a : List[str]="gelu" , a : Union[str, Any]=0.1 , a : str=0.1 , a : List[str]=5_0 , a : Tuple=0.0_2 , a : Union[str, Any]=True , a : int=None , ):
"""simple docstring"""
__snake_case : Optional[int] =parent
__snake_case : Dict =batch_size
__snake_case : Union[str, Any] =seq_length
__snake_case : Optional[Any] =is_training
__snake_case : Any =use_input_mask
__snake_case : Union[str, Any] =vocab_size
__snake_case : Union[str, Any] =hidden_size
__snake_case : int =num_hidden_layers
__snake_case : Optional[int] =num_attention_heads
__snake_case : int =intermediate_size
__snake_case : str =hidden_act
__snake_case : str =hidden_dropout_prob
__snake_case : Tuple =attention_probs_dropout_prob
__snake_case : Tuple =max_position_embeddings
__snake_case : Optional[Any] =initializer_range
__snake_case : List[str] =use_labels
__snake_case : List[Any] =scope
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Optional[int] =None
if self.use_input_mask:
__snake_case : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__snake_case : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Any =self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Dict =self.prepare_config_and_inputs()
__snake_case : Dict =True
__snake_case : int =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__snake_case : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCamelCase ( self : Optional[Any] , a : Union[str, Any] , a : Optional[int] , a : int , a : Tuple , **a : Any , ):
"""simple docstring"""
__snake_case : List[Any] =BertGenerationEncoder(config=a )
model.to(a )
model.eval()
__snake_case : Dict =model(a , attention_mask=a )
__snake_case : Tuple =model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Tuple , a : Union[str, Any] , a : Optional[Any] , a : str , a : List[Any] , a : Optional[int] , a : Optional[Any] , **a : int , ):
"""simple docstring"""
__snake_case : Any =True
__snake_case : List[str] =BertGenerationEncoder(config=a )
model.to(a )
model.eval()
__snake_case : Union[str, Any] =model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
__snake_case : str =model(
a , attention_mask=a , encoder_hidden_states=a , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Any , a : List[str] , a : Dict , a : Optional[Any] , a : Tuple , a : Dict , a : Tuple , **a : Tuple , ):
"""simple docstring"""
__snake_case : List[Any] =True
__snake_case : Any =True
__snake_case : int =BertGenerationDecoder(config=a ).to(a ).eval()
# first forward pass
__snake_case : List[str] =model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
__snake_case : Optional[int] =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case : List[str] =ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case : List[Any] =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__snake_case : Tuple =torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case : List[Any] =torch.cat([input_mask, next_mask] , dim=-1 )
__snake_case : Tuple =model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['''hidden_states'''][0]
__snake_case : Any =model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['''hidden_states'''][0]
# select random slice
__snake_case : int =ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case : int =output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case : int =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1e-3 ) )
def _UpperCamelCase ( self : Optional[Any] , a : List[Any] , a : Dict , a : Optional[Any] , a : List[str] , *a : Dict , ):
"""simple docstring"""
__snake_case : List[str] =BertGenerationDecoder(a )
model.to(a )
model.eval()
__snake_case : Any =model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case : Any =self.prepare_config_and_inputs()
__snake_case : List[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_a : List[Any] = (BertGenerationDecoder,) if is_torch_available() else ()
_a : Union[str, Any] = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
__snake_case : int =BertGenerationEncoderTester(self )
__snake_case : Union[str, Any] =ConfigTester(self , config_class=a , hidden_size=3_7 )
def _UpperCamelCase ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case : Tuple =self.model_tester.prepare_config_and_inputs()
__snake_case : int ='''bert'''
self.model_tester.create_and_check_model(a , a , a , a )
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
__snake_case : List[str] =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*a )
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
__snake_case : Dict =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*a )
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_decoder()
__snake_case : Tuple =None
self.model_tester.create_and_check_model_as_decoder(
a , a , a , a , a , a , )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case : Tuple =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*a )
@slow
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
__snake_case : Optional[int] =BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(a )
@require_torch
class _lowercase ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
__snake_case : List[Any] =BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
__snake_case : Any =torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
__snake_case : str =model(a )[0]
__snake_case : List[str] =torch.Size([1, 8, 1_0_2_4] )
self.assertEqual(output.shape , a )
__snake_case : int =torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4 ) )
@require_torch
class _lowercase ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
__snake_case : int =BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
__snake_case : Dict =torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
__snake_case : List[str] =model(a )[0]
__snake_case : str =torch.Size([1, 8, 5_0_3_5_8] )
self.assertEqual(output.shape , a )
__snake_case : Optional[int] =torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4 ) )
| 497
| 1
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str=None ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = None
if token is not None:
lowerCAmelCase__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
lowerCAmelCase__ = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
lowerCAmelCase__ = requests.get(A_ , headers=A_ ).json()
lowerCAmelCase__ = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
lowerCAmelCase__ = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A_ ):
lowerCAmelCase__ = requests.get(url + F"&page={i + 2}" , headers=A_ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : List[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = None
if token is not None:
lowerCAmelCase__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
lowerCAmelCase__ = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
lowerCAmelCase__ = requests.get(A_ , headers=A_ ).json()
lowerCAmelCase__ = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
lowerCAmelCase__ = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A_ ):
lowerCAmelCase__ = requests.get(url + F"&page={i + 2}" , headers=A_ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = None
if token is not None:
lowerCAmelCase__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
lowerCAmelCase__ = requests.get(A_ , headers=A_ , allow_redirects=A_ )
lowerCAmelCase__ = result.headers["""Location"""]
lowerCAmelCase__ = requests.get(A_ , allow_redirects=A_ )
lowerCAmelCase__ = os.path.join(A_ , F"{artifact_name}.zip" )
with open(A_ , "wb" ) as fp:
fp.write(response.content )
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any]=None ) -> str:
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = None
with zipfile.ZipFile(A_ ) as z:
for filename in z.namelist():
if not os.path.isdir(A_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(A_ ) as f:
for line in f:
lowerCAmelCase__ = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCAmelCase__ = line[: line.index(": " )]
lowerCAmelCase__ = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
lowerCAmelCase__ = line[len("FAILED " ) :]
failed_tests.append(A_ )
elif filename == "job_name.txt":
lowerCAmelCase__ = line
if len(A_ ) != len(A_ ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(A_ )} for `errors` "
F"and {len(A_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
" problem." )
lowerCAmelCase__ = None
if job_name and job_links:
lowerCAmelCase__ = job_links.get(A_ , A_ )
# A list with elements of the form (line of error, error, failed test)
lowerCAmelCase__ = [x + [y] + [job_link] for x, y in zip(A_ , A_ )]
return result
def _a ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = [os.path.join(A_ , A_ ) for p in os.listdir(A_ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(A_ , job_links=A_ ) )
return errors
def _a ( UpperCamelCase_ : Dict , UpperCamelCase_ : int=None ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = Counter()
counter.update([x[1] for x in logs] )
lowerCAmelCase__ = counter.most_common()
lowerCAmelCase__ = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCAmelCase__ = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCAmelCase__ = dict(sorted(r.items() , key=lambda UpperCamelCase_ : item[1]["count"] , reverse=A_ ) )
return r
def _a ( UpperCamelCase_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = test.split("::" )[0]
if test.startswith("tests/models/" ):
lowerCAmelCase__ = test.split("/" )[2]
else:
lowerCAmelCase__ = None
return test
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=None ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCAmelCase__ = [x for x in logs if x[2] is not None]
lowerCAmelCase__ = {x[2] for x in logs}
lowerCAmelCase__ = {}
for test in tests:
lowerCAmelCase__ = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCAmelCase__ = counter.most_common()
lowerCAmelCase__ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCAmelCase__ = sum(error_counts.values() )
if n_errors > 0:
lowerCAmelCase__ = {"""count""": n_errors, """errors""": error_counts}
lowerCAmelCase__ = dict(sorted(r.items() , key=lambda UpperCamelCase_ : item[1]["count"] , reverse=A_ ) )
return r
def _a ( UpperCamelCase_ : str ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = """| no. | error | status |"""
lowerCAmelCase__ = """|-:|:-|:-|"""
lowerCAmelCase__ = [header, sep]
for error in reduced_by_error:
lowerCAmelCase__ = reduced_by_error[error]["""count"""]
lowerCAmelCase__ = F"| {count} | {error[:100]} | |"
lines.append(A_ )
return "\n".join(A_ )
def _a ( UpperCamelCase_ : List[str] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = """| model | no. of errors | major error | count |"""
lowerCAmelCase__ = """|-:|-:|-:|-:|"""
lowerCAmelCase__ = [header, sep]
for model in reduced_by_model:
lowerCAmelCase__ = reduced_by_model[model]["""count"""]
lowerCAmelCase__ = list(reduced_by_model[model]["errors"].items() )[0]
lowerCAmelCase__ = F"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(A_ )
return "\n".join(A_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
a_ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
a_ = get_job_links(args.workflow_run_id, token=args.token)
a_ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
a_ = k.find(''' / ''')
a_ = k[index + len(''' / ''') :]
a_ = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
a_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
a_ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
a_ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
a_ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
a_ = reduce_by_error(errors)
a_ = reduce_by_model(errors)
a_ = make_github_table(reduced_by_error)
a_ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
| 339
|
'''simple docstring'''
def _snake_case ( A_ : str , A_ : str ):
"""simple docstring"""
if not (isinstance(A_ , A_ ) and isinstance(A_ , A_ )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
a_ : Optional[int] = len(A_ )
a_ : Dict = len(A_ )
a_ : Tuple = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
a_ : str = 0
a_ : Union[str, Any] = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
a_ : int = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
a_ : Tuple = i
a_ : Union[str, Any] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 577
| 0
|
from collections import deque
from .hash_table import HashTable
class UpperCamelCase ( snake_case__ ):
"""simple docstring"""
def __init__( self : Any ,*_SCREAMING_SNAKE_CASE : Optional[int] ,**_SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
'''simple docstring'''
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def A( self : Optional[int] ,_SCREAMING_SNAKE_CASE : Optional[Any] ,_SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
'''simple docstring'''
A = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_SCREAMING_SNAKE_CASE )
A = self.values[key]
def A( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return (
sum(self.charge_factor - len(_SCREAMING_SNAKE_CASE ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def A( self : Dict ,_SCREAMING_SNAKE_CASE : Optional[int] ,_SCREAMING_SNAKE_CASE : str=None ) -> Union[str, Any]:
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_SCREAMING_SNAKE_CASE ) == 0
):
return key
return super()._collision_resolution(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
| 110
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class UpperCamelCase ( snake_case__ ):
"""simple docstring"""
def __init__( self : str ,*_SCREAMING_SNAKE_CASE : List[Any] ,**_SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
'''simple docstring'''
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
requires_backends(self ,'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def A( self : Union[str, Any] ,_SCREAMING_SNAKE_CASE : List[Any]=None ,_SCREAMING_SNAKE_CASE : List[str]=None ,_SCREAMING_SNAKE_CASE : int=None ) -> Any:
'''simple docstring'''
A = {}
A = {}
if prompt is not None:
A = prompt
if generate_kwargs is not None:
A = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
A = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
A = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : int ,_SCREAMING_SNAKE_CASE : Union[str, List[str], "Image.Image", List["Image.Image"]] ,**_SCREAMING_SNAKE_CASE : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return super().__call__(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def A( self : Dict ,_SCREAMING_SNAKE_CASE : List[Any] ,_SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> int:
'''simple docstring'''
A = load_image(_SCREAMING_SNAKE_CASE )
if prompt is not None:
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
raise ValueError(
f'Received an invalid text input, got - {type(_SCREAMING_SNAKE_CASE )} - but expected a single string. '
'Note also that one single text can be provided for conditional image to text generation.' )
A = self.model.config.model_type
if model_type == "git":
A = self.image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors=self.framework )
A = self.tokenizer(text=_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids
A = [self.tokenizer.cls_token_id] + input_ids
A = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
A = self.image_processor(images=_SCREAMING_SNAKE_CASE ,header_text=_SCREAMING_SNAKE_CASE ,return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
A = self.image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors=self.framework )
A = self.tokenizer(_SCREAMING_SNAKE_CASE ,return_tensors=self.framework )
model_inputs.update(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'Model type {model_type} does not support conditional text generation' )
else:
A = self.image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
A = None
return model_inputs
def A( self : str ,_SCREAMING_SNAKE_CASE : str ,_SCREAMING_SNAKE_CASE : Dict=None ) -> List[str]:
'''simple docstring'''
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] ,_SCREAMING_SNAKE_CASE )
and all(x is None for x in model_inputs['input_ids'] )
):
A = None
if generate_kwargs is None:
A = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
A = model_inputs.pop(self.model.main_input_name )
A = self.model.generate(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
return model_outputs
def A( self : Dict ,_SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
'''simple docstring'''
A = []
for output_ids in model_outputs:
A = {
'generated_text': self.tokenizer.decode(
_SCREAMING_SNAKE_CASE ,skip_special_tokens=_SCREAMING_SNAKE_CASE ,)
}
records.append(_SCREAMING_SNAKE_CASE )
return records
| 110
| 1
|
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ):
__a : Any = ''
for i in table:
res += inp[i - 1]
return res
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] ):
return data[1:] + data[0]
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] ):
__a : Optional[int] = ''
for i in range(len(lowerCamelCase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ):
__a : List[str] = int('0b' + data[0] + data[-1] , 2 )
__a : List[str] = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ):
__a : List[Any] = message[:4]
__a : str = message[4:]
__a : Any = apply_table(lowerCamelCase_ , lowerCamelCase_ )
__a : int = xor(lowerCamelCase_ , lowerCamelCase_ )
__a : Dict = apply_sbox(lowerCamelCase_ , temp[:4] ) # noqa: E741
__a : Tuple = apply_sbox(lowerCamelCase_ , temp[4:] )
__a : List[Any] = '0' * (2 - len(lowerCamelCase_ )) + l # noqa: E741
__a : List[str] = '0' * (2 - len(lowerCamelCase_ )) + r
__a : List[Any] = apply_table(l + r , lowerCamelCase_ )
__a : Dict = xor(lowerCamelCase_ , lowerCamelCase_ )
return temp + right
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input('''Enter 10 bit key: ''')
SCREAMING_SNAKE_CASE__ = input('''Enter 8 bit message: ''')
SCREAMING_SNAKE_CASE__ = [6, 3, 7, 4, 8, 5, 10, 9]
SCREAMING_SNAKE_CASE__ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
SCREAMING_SNAKE_CASE__ = [2, 4, 3, 1]
SCREAMING_SNAKE_CASE__ = [2, 6, 3, 1, 4, 8, 5, 7]
SCREAMING_SNAKE_CASE__ = [4, 1, 3, 5, 7, 2, 8, 6]
SCREAMING_SNAKE_CASE__ = [4, 1, 2, 3, 2, 3, 4, 1]
SCREAMING_SNAKE_CASE__ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
SCREAMING_SNAKE_CASE__ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
SCREAMING_SNAKE_CASE__ = apply_table(key, paa_table)
SCREAMING_SNAKE_CASE__ = temp[:5]
SCREAMING_SNAKE_CASE__ = temp[5:]
SCREAMING_SNAKE_CASE__ = left_shift(left)
SCREAMING_SNAKE_CASE__ = left_shift(right)
SCREAMING_SNAKE_CASE__ = apply_table(left + right, pa_table)
SCREAMING_SNAKE_CASE__ = left_shift(left)
SCREAMING_SNAKE_CASE__ = left_shift(right)
SCREAMING_SNAKE_CASE__ = left_shift(left)
SCREAMING_SNAKE_CASE__ = left_shift(right)
SCREAMING_SNAKE_CASE__ = apply_table(left + right, pa_table)
# encryption
SCREAMING_SNAKE_CASE__ = apply_table(message, IP)
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = temp[4:] + temp[:4]
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
SCREAMING_SNAKE_CASE__ = apply_table(CT, IP)
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = temp[4:] + temp[:4]
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 47
|
from string import ascii_lowercase, ascii_uppercase
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
if not sentence:
return ""
__a : Union[str, Any] = dict(zip(lowerCamelCase_ , lowerCamelCase_ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47
| 1
|
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
lowerCAmelCase_ = TypeVar('''_T''')
class _snake_case ( Generic[_T] ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _A : Iterable[_T] | None = None):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : list[_T] = list(iterable or [])
_SCREAMING_SNAKE_CASE : list[_T] = []
def __len__( self : List[Any]):
"""simple docstring"""
return len(self._stacka) + len(self._stacka)
def __repr__( self : Optional[Any]):
"""simple docstring"""
return f"""Queue({tuple(self._stacka[::-1] + self._stacka)})"""
def _lowerCAmelCase ( self : Dict , _A : _T):
"""simple docstring"""
self._stacka.append(_A)
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = self._stacka.pop
_SCREAMING_SNAKE_CASE : List[str] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop())
if not self._stacka:
raise IndexError("""Queue is empty""")
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 706
|
"""simple docstring"""
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("""only integers accepted as input""" )
else:
_SCREAMING_SNAKE_CASE : List[Any] = str(abs(__SCREAMING_SNAKE_CASE ) )
_SCREAMING_SNAKE_CASE : List[str] = [list(__SCREAMING_SNAKE_CASE ) for char in range(len(__SCREAMING_SNAKE_CASE ) )]
for index in range(len(__SCREAMING_SNAKE_CASE ) ):
num_transpositions[index].pop(__SCREAMING_SNAKE_CASE )
return max(
int("""""".join(list(__SCREAMING_SNAKE_CASE ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 635
| 0
|
'''simple docstring'''
import math
import sys
def _lowerCAmelCase ( __snake_case : int ) -> int:
if number != int(__snake_case ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__A : str = [-1] * (number + 1)
__A : Dict = 0
for i in range(1 , number + 1 ):
__A : int = sys.maxsize
__A : int = int(math.sqrt(__snake_case ) )
for j in range(1 , root + 1 ):
__A : str = 1 + answers[i - (j**2)]
__A : Dict = min(__snake_case , __snake_case )
__A : Union[str, Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8
|
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class __lowercase (_UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = GPTSwaTokenizer
_UpperCamelCase = False
_UpperCamelCase = True
_UpperCamelCase = False
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase : Dict = GPTSwaTokenizer(A_ , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self , A_ ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Tuple = '''This is a test'''
__lowerCAmelCase : Dict = '''This is a test'''
return input_text, output_text
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = '''<s>'''
__lowerCAmelCase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(A_ ) , 2000 )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : Dict = GPTSwaTokenizer(A_ )
__lowerCAmelCase : Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(A_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [465, 287, 265, 631, 842] )
__lowerCAmelCase : int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
# fmt: off
self.assertListEqual(
A_ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
__lowerCAmelCase : List[Any] = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
__lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(A_ )
# fmt: off
self.assertListEqual(
A_ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] )
# fmt: on
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : str = GPTSwaTokenizer(A_ )
__lowerCAmelCase : str = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
__lowerCAmelCase : List[Any] = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(A_ , A_ ):
self.assertListEqual(tokenizer.encode_fast(A_ ) , A_ )
# Test that decode_fast returns the input text
for text, token_ids in zip(A_ , A_ ):
self.assertEqual(tokenizer.decode_fast(A_ ) , A_ )
@slow
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : Tuple = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
__lowerCAmelCase : Optional[int] = {'''input_ids''': [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=A_ , )
| 492
| 0
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCamelCase ( A , A , A , A , A ):
# Load configuration defined in the metadata file
with open(A ) as metadata_file:
UpperCamelCase__ = json.load(A )
UpperCamelCase__ = LukeConfig(use_entity_aware_attention=A , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
UpperCamelCase__ = torch.load(A , map_location='''cpu''' )['''module''']
# Load the entity vocab file
UpperCamelCase__ = load_original_entity_vocab(A )
# add an entry for [MASK2]
UpperCamelCase__ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
UpperCamelCase__ = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCamelCase__ = AddedToken('''<ent>''' , lstrip=A , rstrip=A )
UpperCamelCase__ = AddedToken('''<ent2>''' , lstrip=A , rstrip=A )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(A )
with open(os.path.join(A , '''tokenizer_config.json''' ) , '''r''' ) as f:
UpperCamelCase__ = json.load(A )
UpperCamelCase__ = '''MLukeTokenizer'''
with open(os.path.join(A , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(A , A )
with open(os.path.join(A , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(A , A )
UpperCamelCase__ = MLukeTokenizer.from_pretrained(A )
# Initialize the embeddings of the special tokens
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
UpperCamelCase__ = state_dict['''embeddings.word_embeddings.weight''']
UpperCamelCase__ = word_emb[ent_init_index].unsqueeze(0 )
UpperCamelCase__ = word_emb[enta_init_index].unsqueeze(0 )
UpperCamelCase__ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
UpperCamelCase__ = state_dict[bias_name]
UpperCamelCase__ = decoder_bias[ent_init_index].unsqueeze(0 )
UpperCamelCase__ = decoder_bias[enta_init_index].unsqueeze(0 )
UpperCamelCase__ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCamelCase__ = f"encoder.layer.{layer_index}.attention.self."
UpperCamelCase__ = state_dict[prefix + matrix_name]
UpperCamelCase__ = state_dict[prefix + matrix_name]
UpperCamelCase__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCamelCase__ = state_dict['''entity_embeddings.entity_embeddings.weight''']
UpperCamelCase__ = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
UpperCamelCase__ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
UpperCamelCase__ = state_dict['''entity_predictions.bias''']
UpperCamelCase__ = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
UpperCamelCase__ = torch.cat([entity_prediction_bias, entity_mask_bias] )
UpperCamelCase__ = LukeForMaskedLM(config=A ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
UpperCamelCase__ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
UpperCamelCase__ = state_dict[key]
else:
UpperCamelCase__ = state_dict[key]
UpperCamelCase__ , UpperCamelCase__ = model.load_state_dict(A , strict=A )
if set(A ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(A ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
UpperCamelCase__ = MLukeTokenizer.from_pretrained(A , task='''entity_classification''' )
UpperCamelCase__ = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
UpperCamelCase__ = (0, 9)
UpperCamelCase__ = tokenizer(A , entity_spans=[span] , return_tensors='''pt''' )
UpperCamelCase__ = model(**A )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase__ = torch.Size((1, 33, 768) )
UpperCamelCase__ = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , A , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase__ = torch.Size((1, 1, 768) )
UpperCamelCase__ = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , A , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
UpperCamelCase__ = MLukeTokenizer.from_pretrained(A )
UpperCamelCase__ = '''Tokyo is the capital of <mask>.'''
UpperCamelCase__ = (24, 30)
UpperCamelCase__ = tokenizer(A , entity_spans=[span] , return_tensors='''pt''' )
UpperCamelCase__ = model(**A )
UpperCamelCase__ = encoding['''input_ids'''][0].tolist()
UpperCamelCase__ = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
UpperCamelCase__ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(A )
UpperCamelCase__ = outputs.entity_logits[0][0].argmax().item()
UpperCamelCase__ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(A ) )
model.save_pretrained(A )
def __UpperCamelCase ( A ):
UpperCamelCase__ = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
UpperCamelCase__ = [json.loads(A ) for line in open(A )]
UpperCamelCase__ = {}
for entry in data:
UpperCamelCase__ = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
UpperCamelCase__ = entity_id
break
UpperCamelCase__ = f"{language}:{entity_name}"
UpperCamelCase__ = entity_id
return new_mapping
if __name__ == "__main__":
__magic_name__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__magic_name__ =parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 469
|
from __future__ import annotations
def __UpperCamelCase ( A , A ):
UpperCamelCase__ = get_failure_array(A )
# 2) Step through text searching for pattern
UpperCamelCase__ , UpperCamelCase__ = 0, 0 # index into text, pattern
while i < len(A ):
if pattern[j] == text[i]:
if j == (len(A ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
UpperCamelCase__ = failure[j - 1]
continue
i += 1
return False
def __UpperCamelCase ( A ):
UpperCamelCase__ = [0]
UpperCamelCase__ = 0
UpperCamelCase__ = 1
while j < len(A ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
UpperCamelCase__ = failure[i - 1]
continue
j += 1
failure.append(A )
return failure
if __name__ == "__main__":
# Test 1)
__magic_name__ ='''abc1abc12'''
__magic_name__ ='''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__magic_name__ ='''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__magic_name__ ='''ABABX'''
__magic_name__ ='''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
__magic_name__ ='''AAAB'''
__magic_name__ ='''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
__magic_name__ ='''abcdabcy'''
__magic_name__ ='''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
__magic_name__ ='''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 469
| 1
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a_ :Tuple = logging.get_logger(__name__)
a_ :Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ :Optional[int] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Any = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
a_ :List[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
a_ :Tuple = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
a_ :str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Optional[int] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ :List[str] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
a_ :Optional[int] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
a_ :Tuple = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_UpperCAmelCase )
class lowercase :
def __call__( self : List[Any] , _lowercase : Any , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Union[bool, str] = False , _lowercase : Union[bool, str] = False , _lowercase : Optional[int] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[bool] = None , **_lowercase : str , ):
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE__ : List[str] = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = titles if not isinstance(_lowercase , _lowercase ) else [titles]
SCREAMING_SNAKE_CASE__ : Optional[int] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
SCREAMING_SNAKE_CASE__ : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : str = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
f"""There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Tuple = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE__ : Dict = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowercase__ ( self : List[Any] , _lowercase : BatchEncoding , _lowercase : DPRReaderOutput , _lowercase : int = 16 , _lowercase : int = 64 , _lowercase : int = 4 , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = reader_output[:3]
SCREAMING_SNAKE_CASE__ : Any = len(_lowercase )
SCREAMING_SNAKE_CASE__ : int = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE__ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE__ : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE__ : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE__ : List[str] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self : Dict , _lowercase : List[int] , _lowercase : List[int] , _lowercase : int , _lowercase : int , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
SCREAMING_SNAKE_CASE__ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase : Dict = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
| 35
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__a : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : bool = field(default=snake_case_ , metadata={"""help""": """Whether to use SortishSampler or not."""} )
a : bool = field(
default=snake_case_ , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
a : Optional[int] = field(
default=snake_case_ , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
a : Optional[int] = field(
default=snake_case_ , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
a : Optional[Union[str, Path, GenerationConfig]] = field(
default=snake_case_ , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def __a ( self ) -> int:
"""simple docstring"""
lowercase__ : List[str] = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
lowercase__ : Union[str, Any] = v.to_dict()
return d
| 397
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
snake_case_ = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 537
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=a ):
_UpperCamelCase = ["""onnx"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''onnx'''] )
@classmethod
def snake_case ( cls , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(cls , ['''onnx'''] )
@classmethod
def snake_case ( cls , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(cls , ['''onnx'''] )
| 537
| 1
|
import fire
from utils import calculate_rouge, save_json
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , **__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = [x.strip() for x in open(__UpperCamelCase ).readlines()]
SCREAMING_SNAKE_CASE_ = [x.strip() for x in open(__UpperCamelCase ).readlines()][: len(__UpperCamelCase )]
SCREAMING_SNAKE_CASE_ = calculate_rouge(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
if save_path is not None:
save_json(__UpperCamelCase , __UpperCamelCase , indent=__UpperCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 140
|
from numpy import exp, pi, sqrt
def a__ ( __UpperCamelCase , __UpperCamelCase = 0.0 , __UpperCamelCase = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140
| 1
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCAmelCase : Any = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
UpperCAmelCase : Union[str, Any] = (
subprocess.check_output(F"git diff --diff-filter=d --name-only {fork_point_sha}".split()).decode("utf-8").split()
)
UpperCAmelCase : int = "|".join(sys.argv[1:])
UpperCAmelCase : Optional[int] = re.compile(rF"^({joined_dirs}).*?\.py$")
UpperCAmelCase : Any = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 100
|
"""simple docstring"""
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( folder_based_builder.FolderBasedBuilderConfig ):
lowercase__ = None
lowercase__ = None
class SCREAMING_SNAKE_CASE__ ( folder_based_builder.FolderBasedBuilder ):
lowercase__ = datasets.Audio()
lowercase__ = "audio"
lowercase__ = AudioFolderConfig
lowercase__ = 42 # definition at the bottom of the script
lowercase__ = AudioClassification(audio_column="audio" , label_column="label" )
UpperCAmelCase : Tuple = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
UpperCAmelCase : Any = AUDIO_EXTENSIONS
| 100
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : Union[str, Any] = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Tuple ='''open-llama'''
def __init__( self , _lowerCamelCase=1_0_0_0_0_0 , _lowerCamelCase=4_0_9_6 , _lowerCamelCase=1_1_0_0_8 , _lowerCamelCase=3_2 , _lowerCamelCase=3_2 , _lowerCamelCase="silu" , _lowerCamelCase=2_0_4_8 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-6 , _lowerCamelCase=True , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ):
UpperCamelCase_: int = vocab_size
UpperCamelCase_: List[Any] = max_position_embeddings
UpperCamelCase_: Dict = hidden_size
UpperCamelCase_: Dict = intermediate_size
UpperCamelCase_: Union[str, Any] = num_hidden_layers
UpperCamelCase_: Dict = num_attention_heads
UpperCamelCase_: Union[str, Any] = hidden_act
UpperCamelCase_: Union[str, Any] = initializer_range
UpperCamelCase_: List[Any] = rms_norm_eps
UpperCamelCase_: Union[str, Any] = use_cache
UpperCamelCase_: Dict = kwargs.pop(
'use_memorry_efficient_attention' , _lowerCamelCase )
UpperCamelCase_: Union[str, Any] = hidden_dropout_prob
UpperCamelCase_: Any = attention_dropout_prob
UpperCamelCase_: int = use_stable_embedding
UpperCamelCase_: Tuple = shared_input_output_embedding
UpperCamelCase_: str = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase , )
def _a ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'''got {self.rope_scaling}''' )
UpperCamelCase_: str = self.rope_scaling.get('type' , _lowerCamelCase )
UpperCamelCase_: int = self.rope_scaling.get('factor' , _lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_lowerCamelCase , _lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 57
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = 42
_UpperCamelCase = 42
class __lowercase (nn.Module ):
_UpperCamelCase = 42
_UpperCamelCase = (16, 32, 96, 256)
_UpperCamelCase = jnp.floataa
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : int = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCAmelCase : List[str] = []
for i in range(len(self.block_out_channels ) - 1 ):
__lowerCAmelCase : List[Any] = self.block_out_channels[i]
__lowerCAmelCase : Dict = self.block_out_channels[i + 1]
__lowerCAmelCase : Any = nn.Conv(
A_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(A_ )
__lowerCAmelCase : Any = nn.Conv(
A_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(A_ )
__lowerCAmelCase : List[str] = blocks
__lowerCAmelCase : str = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , A_ ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Any = self.conv_in(A_ )
__lowerCAmelCase : int = nn.silu(A_ )
for block in self.blocks:
__lowerCAmelCase : int = block(A_ )
__lowerCAmelCase : Union[str, Any] = nn.silu(A_ )
__lowerCAmelCase : List[Any] = self.conv_out(A_ )
return embedding
@flax_register_to_config
class __lowercase (nn.Module , _UpperCAmelCase , _UpperCAmelCase ):
_UpperCamelCase = 32
_UpperCamelCase = 4
_UpperCamelCase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCamelCase = False
_UpperCamelCase = (320, 640, 1280, 1280)
_UpperCamelCase = 2
_UpperCamelCase = 8
_UpperCamelCase = None
_UpperCamelCase = 1280
_UpperCamelCase = 0.0
_UpperCamelCase = False
_UpperCamelCase = jnp.floataa
_UpperCamelCase = True
_UpperCamelCase = 0
_UpperCamelCase = "rgb"
_UpperCamelCase = (16, 32, 96, 256)
def UpperCamelCase__ ( self , A_ ) ->FrozenDict:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = (1, self.in_channels, self.sample_size, self.sample_size)
__lowerCAmelCase : str = jnp.zeros(A_ , dtype=jnp.floataa )
__lowerCAmelCase : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
__lowerCAmelCase : Optional[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__lowerCAmelCase : Any = (1, 3, self.sample_size * 8, self.sample_size * 8)
__lowerCAmelCase : Optional[Any] = jnp.zeros(A_ , dtype=jnp.floataa )
__lowerCAmelCase, __lowerCAmelCase : Any = jax.random.split(A_ )
__lowerCAmelCase : int = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(A_ , A_ , A_ , A_ , A_ )["params"]
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.block_out_channels
__lowerCAmelCase : Optional[Any] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowerCAmelCase : Optional[int] = self.num_attention_heads or self.attention_head_dim
# input
__lowerCAmelCase : List[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__lowerCAmelCase : List[str] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__lowerCAmelCase : str = FlaxTimestepEmbedding(A_ , dtype=self.dtype )
__lowerCAmelCase : Optional[int] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
__lowerCAmelCase : Union[str, Any] = self.only_cross_attention
if isinstance(A_ , A_ ):
__lowerCAmelCase : Union[str, Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(A_ , A_ ):
__lowerCAmelCase : str = (num_attention_heads,) * len(self.down_block_types )
# down
__lowerCAmelCase : Optional[int] = []
__lowerCAmelCase : Union[str, Any] = []
__lowerCAmelCase : Optional[Any] = block_out_channels[0]
__lowerCAmelCase : List[str] = nn.Conv(
A_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A_ )
for i, down_block_type in enumerate(self.down_block_types ):
__lowerCAmelCase : List[Any] = output_channel
__lowerCAmelCase : Optional[int] = block_out_channels[i]
__lowerCAmelCase : str = i == len(A_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowerCAmelCase : List[str] = FlaxCrossAttnDownBlockaD(
in_channels=A_ , out_channels=A_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
__lowerCAmelCase : List[Any] = FlaxDownBlockaD(
in_channels=A_ , out_channels=A_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(A_ )
for _ in range(self.layers_per_block ):
__lowerCAmelCase : str = nn.Conv(
A_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A_ )
if not is_final_block:
__lowerCAmelCase : Any = nn.Conv(
A_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A_ )
__lowerCAmelCase : int = down_blocks
__lowerCAmelCase : Optional[int] = controlnet_down_blocks
# mid
__lowerCAmelCase : List[str] = block_out_channels[-1]
__lowerCAmelCase : Any = FlaxUNetMidBlockaDCrossAttn(
in_channels=A_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
__lowerCAmelCase : Any = nn.Conv(
A_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , A_ , A_ , A_ , A_ , A_ = 1.0 , A_ = True , A_ = False , ) ->Union[FlaxControlNetOutput, Tuple]:
'''simple docstring'''
__lowerCAmelCase : int = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__lowerCAmelCase : List[Any] = jnp.flip(A_ , axis=1 )
# 1. time
if not isinstance(A_ , jnp.ndarray ):
__lowerCAmelCase : List[str] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(A_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowerCAmelCase : Optional[int] = timesteps.astype(dtype=jnp.floataa )
__lowerCAmelCase : Optional[Any] = jnp.expand_dims(A_ , 0 )
__lowerCAmelCase : Optional[int] = self.time_proj(A_ )
__lowerCAmelCase : str = self.time_embedding(A_ )
# 2. pre-process
__lowerCAmelCase : List[str] = jnp.transpose(A_ , (0, 2, 3, 1) )
__lowerCAmelCase : Union[str, Any] = self.conv_in(A_ )
__lowerCAmelCase : Tuple = jnp.transpose(A_ , (0, 2, 3, 1) )
__lowerCAmelCase : int = self.controlnet_cond_embedding(A_ )
sample += controlnet_cond
# 3. down
__lowerCAmelCase : Tuple = (sample,)
for down_block in self.down_blocks:
if isinstance(A_ , A_ ):
__lowerCAmelCase, __lowerCAmelCase : Dict = down_block(A_ , A_ , A_ , deterministic=not train )
else:
__lowerCAmelCase, __lowerCAmelCase : Optional[Any] = down_block(A_ , A_ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__lowerCAmelCase : Union[str, Any] = self.mid_block(A_ , A_ , A_ , deterministic=not train )
# 5. contronet blocks
__lowerCAmelCase : Union[str, Any] = ()
for down_block_res_sample, controlnet_block in zip(A_ , self.controlnet_down_blocks ):
__lowerCAmelCase : List[Any] = controlnet_block(A_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
__lowerCAmelCase : Dict = controlnet_down_block_res_samples
__lowerCAmelCase : int = self.controlnet_mid_block(A_ )
# 6. scaling
__lowerCAmelCase : Tuple = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=A_ , mid_block_res_sample=A_ )
| 492
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase = {
"""distilbert-base-uncased""": 5_12,
"""distilbert-base-uncased-distilled-squad""": 5_12,
"""distilbert-base-cased""": 5_12,
"""distilbert-base-cased-distilled-squad""": 5_12,
"""distilbert-base-german-cased""": 5_12,
"""distilbert-base-multilingual-cased""": 5_12,
}
lowerCAmelCase = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class lowerCamelCase ( _A ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = DistilBertTokenizer
def __init__( self , a_=None , a_=None , a_=True , a_="[UNK]" , a_="[SEP]" , a_="[PAD]" , a_="[CLS]" , a_="[MASK]" , a_=True , a_=None , **a_ , ):
super().__init__(
a_ , tokenizer_file=a_ , do_lower_case=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , tokenize_chinese_chars=a_ , strip_accents=a_ , **a_ , )
lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , a_ ) != do_lower_case
or normalizer_state.get("strip_accents" , a_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , a_ ) != tokenize_chinese_chars
):
lowerCAmelCase : str = getattr(a_ , normalizer_state.pop("type" ) )
lowerCAmelCase : Optional[int] = do_lower_case
lowerCAmelCase : Any = strip_accents
lowerCAmelCase : Optional[Any] = tokenize_chinese_chars
lowerCAmelCase : Tuple = normalizer_class(**a_ )
lowerCAmelCase : Union[str, Any] = do_lower_case
def _lowerCamelCase ( self , a_ , a_=None ):
lowerCAmelCase : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self , a_ , a_ = None ):
lowerCAmelCase : Tuple = [self.sep_token_id]
lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self , a_ , a_ = None ):
lowerCAmelCase : Union[str, Any] = self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ )
| 551
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCamelCase :
def __init__( self , a_ , a_=2 , a_=True , a_=False , a_=10 , a_=3 , a_=32 * 8 , a_=32 * 8 , a_=4 , a_=64 , ):
lowerCAmelCase : Optional[int] = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : str = is_training
lowerCAmelCase : Any = use_auxiliary_loss
lowerCAmelCase : Dict = num_queries
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : str = min_size
lowerCAmelCase : Any = max_size
lowerCAmelCase : int = num_labels
lowerCAmelCase : Tuple = hidden_dim
lowerCAmelCase : Optional[Any] = hidden_dim
def _lowerCamelCase ( self ):
lowerCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
a_ )
lowerCAmelCase : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=a_ )
lowerCAmelCase : str = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=a_ ) > 0.5
).float()
lowerCAmelCase : Union[str, Any] = (torch.rand((self.batch_size, self.num_labels) , device=a_ ) > 0.5).long()
lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _lowerCamelCase ( self ):
lowerCAmelCase : Optional[Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowerCAmelCase : Optional[Any] = self.num_queries
lowerCAmelCase : Optional[Any] = self.num_labels
lowerCAmelCase : str = [1, 1, 1, 1]
lowerCAmelCase : Tuple = self.num_channels
lowerCAmelCase : str = 64
lowerCAmelCase : Any = 128
lowerCAmelCase : List[str] = self.hidden_dim
lowerCAmelCase : Optional[Any] = self.hidden_dim
lowerCAmelCase : int = self.hidden_dim
return config
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = self.prepare_config_and_inputs()
lowerCAmelCase : Optional[int] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def _lowerCamelCase ( self , a_ , a_ ):
lowerCAmelCase : str = output.encoder_hidden_states
lowerCAmelCase : List[str] = output.pixel_decoder_hidden_states
lowerCAmelCase : Any = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(a_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(a_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(a_ ) , config.decoder_layers )
def _lowerCamelCase ( self , a_ , a_ , a_ , a_=False ):
with torch.no_grad():
lowerCAmelCase : Tuple = MaskaFormerModel(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase : Tuple = model(pixel_values=a_ , pixel_mask=a_ )
lowerCAmelCase : str = model(a_ , output_hidden_states=a_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(a_ , a_ )
def _lowerCamelCase ( self , a_ , a_ , a_ , a_ , a_ ):
lowerCAmelCase : Union[str, Any] = MaskaFormerForUniversalSegmentation(config=a_ )
model.to(a_ )
model.eval()
def comm_check_on_output(a_ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase : List[str] = model(pixel_values=a_ , pixel_mask=a_ )
lowerCAmelCase : Union[str, Any] = model(a_ )
comm_check_on_output(a_ )
lowerCAmelCase : str = model(
pixel_values=a_ , pixel_mask=a_ , mask_labels=a_ , class_labels=a_ )
comm_check_on_output(a_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCamelCase ( _A , _A , unittest.TestCase ):
snake_case_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
snake_case_ = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def _lowerCamelCase ( self ):
lowerCAmelCase : List[Any] = MaskaFormerModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=a_ , has_text_modality=a_ )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(a_ , **a_ , output_hidden_states=a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*a_ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def _lowerCamelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _lowerCamelCase ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = model_class(a_ )
lowerCAmelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : List[str] = [*signature.parameters.keys()]
lowerCAmelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
@slow
def _lowerCamelCase ( self ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowerCAmelCase : int = MaskaFormerModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase : List[Any] = (self.model_tester.min_size,) * 2
lowerCAmelCase : Union[str, Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=a_ ),
"mask_labels": torch.randn((2, 10, *size) , device=a_ ),
"class_labels": torch.zeros(2 , 10 , device=a_ ).long(),
}
lowerCAmelCase : Optional[Any] = self.model_tester.get_config()
lowerCAmelCase : List[Any] = MaskaFormerForUniversalSegmentation(a_ ).to(a_ )
lowerCAmelCase : Dict = model(**a_ )
self.assertTrue(outputs.loss is not None )
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(a_ , **a_ , output_hidden_states=a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Optional[int] = model_class(a_ ).to(a_ )
lowerCAmelCase : Union[str, Any] = model(**a_ , output_attentions=a_ )
self.assertTrue(outputs.attentions is not None )
def _lowerCamelCase ( self ):
if not self.model_tester.is_training:
return
lowerCAmelCase : List[Any] = self.all_model_classes[1]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase : List[Any] = model_class(a_ )
model.to(a_ )
model.train()
lowerCAmelCase : Union[str, Any] = model(a_ , mask_labels=a_ , class_labels=a_ ).loss
loss.backward()
def _lowerCamelCase ( self ):
lowerCAmelCase : Optional[int] = self.all_model_classes[1]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase : Tuple = True
lowerCAmelCase : int = True
lowerCAmelCase : int = model_class(a_ ).to(a_ )
model.train()
lowerCAmelCase : Any = model(a_ , mask_labels=a_ , class_labels=a_ )
lowerCAmelCase : List[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase : Optional[int] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowerCAmelCase : List[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase : Tuple = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=a_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCAmelCase = 1e-4
def __A ( ):
lowerCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _lowerCamelCase ( self ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _lowerCamelCase ( self ):
lowerCAmelCase : int = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(a_ )
lowerCAmelCase : Optional[int] = self.default_image_processor
lowerCAmelCase : Dict = prepare_img()
lowerCAmelCase : Any = image_processor(a_ , return_tensors="pt" ).to(a_ )
lowerCAmelCase : str = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(a_ , (1, 3, 384, 384) )
with torch.no_grad():
lowerCAmelCase : Dict = model(**a_ )
lowerCAmelCase : Union[str, Any] = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(a_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , a_ , atol=a_ ) )
lowerCAmelCase : Optional[Any] = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(a_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , a_ , atol=a_ ) )
lowerCAmelCase : Optional[int] = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(a_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , a_ , atol=a_ ) )
def _lowerCamelCase ( self ):
lowerCAmelCase : Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(a_ ).eval()
lowerCAmelCase : Union[str, Any] = self.default_image_processor
lowerCAmelCase : Dict = prepare_img()
lowerCAmelCase : Tuple = image_processor(a_ , return_tensors="pt" ).to(a_ )
lowerCAmelCase : Any = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(a_ , (1, 3, 384, 384) )
with torch.no_grad():
lowerCAmelCase : List[str] = model(**a_ )
# masks_queries_logits
lowerCAmelCase : Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowerCAmelCase : Tuple = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
lowerCAmelCase : Tuple = torch.tensor(a_ ).to(a_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , a_ , atol=a_ ) )
# class_queries_logits
lowerCAmelCase : str = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase : List[str] = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(a_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , a_ , atol=a_ ) )
def _lowerCamelCase ( self ):
lowerCAmelCase : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(a_ ).eval()
lowerCAmelCase : Optional[Any] = self.default_image_processor
lowerCAmelCase : Optional[int] = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
lowerCAmelCase : Optional[int] = inputs["pixel_values"].to(a_ )
lowerCAmelCase : int = [el.to(a_ ) for el in inputs["mask_labels"]]
lowerCAmelCase : Union[str, Any] = [el.to(a_ ) for el in inputs["class_labels"]]
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(**a_ )
self.assertTrue(outputs.loss is not None )
| 551
| 1
|
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = AutoConfig.from_pretrained(snake_case )
__magic_name__ :Dict = FlaxAutoModelForSeqaSeqLM.from_config(config=snake_case )
__magic_name__ :Any = checkpoints.load_tax_checkpoint(snake_case )
__magic_name__ :List[str] = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
__magic_name__ :Tuple = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
__magic_name__ :Optional[int] = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ :Any = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
__magic_name__ :Union[str, Any] = f'''layers_{str(snake_case )}'''
# Self-Attention
__magic_name__ :List[str] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
__magic_name__ :str = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
__magic_name__ :str = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
__magic_name__ :Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ :List[str] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
__magic_name__ :Any = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
__magic_name__ :Any = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
__magic_name__ :Tuple = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
__magic_name__ :List[str] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
__magic_name__ :List[str] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
__magic_name__ :Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
__magic_name__ :Optional[int] = flax_model.params['''encoder''']['''block'''][str(snake_case )]['''layer''']
__magic_name__ :List[Any] = tax_attention_key
__magic_name__ :List[str] = tax_attention_out
__magic_name__ :Optional[int] = tax_attention_query
__magic_name__ :str = tax_attention_value
__magic_name__ :Dict = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ :Any = tax_global_layer_norm
if split_mlp_wi:
__magic_name__ :str = tax_mlp_wi_a
__magic_name__ :Dict = tax_mlp_wi_a
else:
__magic_name__ :Tuple = tax_mlp_wi
__magic_name__ :Optional[int] = tax_mlp_wo
__magic_name__ :Optional[int] = tax_mlp_layer_norm
__magic_name__ :Any = flax_model_encoder_layer_block
# Only for layer 0:
__magic_name__ :Dict = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
__magic_name__ :List[Any] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ :Any = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
__magic_name__ :Dict = tax_encoder_global_rel_embedding
# Assigning
__magic_name__ :Union[str, Any] = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
__magic_name__ :List[str] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
__magic_name__ :List[Any] = f'''layers_{str(snake_case )}'''
# Self-Attention
__magic_name__ :Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
__magic_name__ :str = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
__magic_name__ :Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
__magic_name__ :Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
__magic_name__ :Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
__magic_name__ :Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
__magic_name__ :Tuple = tax_enc_dec_attention_module['''key''']['''kernel''']
__magic_name__ :Optional[int] = tax_enc_dec_attention_module['''out''']['''kernel''']
__magic_name__ :List[str] = tax_enc_dec_attention_module['''query''']['''kernel''']
__magic_name__ :Tuple = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
__magic_name__ :int = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
__magic_name__ :Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
__magic_name__ :Dict = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
__magic_name__ :int = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
__magic_name__ :Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
__magic_name__ :Dict = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
__magic_name__ :List[str] = flax_model.params['''decoder''']['''block'''][str(snake_case )]['''layer''']
__magic_name__ :Any = tax_attention_key
__magic_name__ :List[str] = tax_attention_out
__magic_name__ :Tuple = tax_attention_query
__magic_name__ :Tuple = tax_attention_value
__magic_name__ :Tuple = tax_pre_attention_layer_norm
__magic_name__ :Optional[Any] = tax_enc_dec_attention_key
__magic_name__ :str = tax_enc_dec_attention_out
__magic_name__ :Union[str, Any] = tax_enc_dec_attention_query
__magic_name__ :Any = tax_enc_dec_attention_value
__magic_name__ :Tuple = tax_cross_layer_norm
if split_mlp_wi:
__magic_name__ :Optional[int] = tax_mlp_wi_a
__magic_name__ :Union[str, Any] = tax_mlp_wi_a
else:
__magic_name__ :Optional[int] = tax_mlp_wi
__magic_name__ :List[str] = tax_mlp_wo
__magic_name__ :int = txa_mlp_layer_norm
__magic_name__ :str = flax_model_decoder_layer_block
# Decoder Normalization
__magic_name__ :Optional[Any] = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
__magic_name__ :Tuple = txa_decoder_norm
# Only for layer 0:
__magic_name__ :Optional[Any] = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
__magic_name__ :str = tax_decoder_rel_embedding
# Token Embeddings
__magic_name__ :List[Any] = tax_model['''target''']['''token_embedder''']['''embedding''']
__magic_name__ :Any = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
__magic_name__ :int = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(snake_case )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint."""
)
parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""")
parser.add_argument(
"""--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model."""
)
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 0
|
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
SCREAMING_SNAKE_CASE__ : List[str] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , *__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Any = eval_examples
__magic_name__ :str = post_process_function
__magic_name__ :int = quant_trainer_args
__magic_name__ :List[str] = 1_2_8 # default number of calibration samples
def A ( self , __lowerCAmelCase=None ):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
__magic_name__ :Optional[Any] = calib_dataset if calib_dataset is not None else self.calib_dataset
__magic_name__ :Optional[int] = self._remove_unused_columns(__lowerCAmelCase , description='''Calibration''' )
return DataLoader(
__lowerCAmelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__lowerCAmelCase , )
def A ( self , __lowerCAmelCase=None ):
"""simple docstring"""
__magic_name__ :Dict = self.train_dataset if calib_dataset is None else calib_dataset
__magic_name__ :Any = self.get_calib_dataloader(__lowerCAmelCase )
__magic_name__ :List[str] = self.model
quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args , calib=__lowerCAmelCase )
model.eval()
quant_trainer.enable_calibration(__lowerCAmelCase )
logger.info('''***** Running calibration *****''' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(__lowerCAmelCase ):
# Prediction step
__magic_name__ , __magic_name__ , __magic_name__ :str = self.prediction_step(__lowerCAmelCase , __lowerCAmelCase , prediction_loss_only=__lowerCAmelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__lowerCAmelCase , self.quant_trainer_args )
__magic_name__ :Any = model
def A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = "eval" ):
"""simple docstring"""
__magic_name__ :Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
__magic_name__ :Optional[Any] = self.get_eval_dataloader(__lowerCAmelCase )
__magic_name__ :str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :Any = self.compute_metrics
__magic_name__ :List[Any] = None
__magic_name__ :List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__magic_name__ :Optional[Any] = eval_loop(
__lowerCAmelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , )
finally:
__magic_name__ :Union[str, Any] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
__magic_name__ :Union[str, Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions )
__magic_name__ :int = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :Dict = metrics.pop(__lowerCAmelCase )
self.log(__lowerCAmelCase )
else:
__magic_name__ :List[str] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__magic_name__ :Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCAmelCase )
return metrics
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase = "test" ):
"""simple docstring"""
__magic_name__ :int = self.get_test_dataloader(__lowerCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :Dict = self.compute_metrics
__magic_name__ :str = None
__magic_name__ :Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__magic_name__ :int = eval_loop(
__lowerCAmelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , )
finally:
__magic_name__ :List[Any] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
__magic_name__ :Optional[Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions , '''predict''' )
__magic_name__ :Dict = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :List[str] = metrics.pop(__lowerCAmelCase )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCAmelCase )
def A ( self , __lowerCAmelCase="./" ):
"""simple docstring"""
__magic_name__ :List[Any] = self.eval_dataset
__magic_name__ :Any = self.get_eval_dataloader(__lowerCAmelCase )
__magic_name__ :int = next(iter(__lowerCAmelCase ) )
# saving device - to make it consistent
__magic_name__ :str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
__magic_name__ :int = tuple(v.to(__lowerCAmelCase ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
__magic_name__ :Any = True
__magic_name__ :Optional[int] = self.model.to(__lowerCAmelCase )
model.eval()
model.float()
__magic_name__ :Any = model.module if hasattr(__lowerCAmelCase , '''module''' ) else model
quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args )
__magic_name__ :int = os.path.join(__lowerCAmelCase , '''model.onnx''' )
logger.info(F'''exporting model to {output_model_file}''' )
__magic_name__ :Dict = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , export_params=__lowerCAmelCase , opset_version=1_3 , do_constant_folding=__lowerCAmelCase , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=__lowerCAmelCase , )
logger.info('''onnx export finished''' )
| 0
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : Optional[Any] = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
a_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 714
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
a_ : str = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 148
| 0
|
from collections.abc import Sequence
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(a_ ) )
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : List[str] = 0.0
for coeff in reversed(a_ ):
_lowerCAmelCase : Tuple = result * x + coeff
return result
if __name__ == "__main__":
snake_case = (0.0, 0.0, 5.0, 9.3, 7.0)
snake_case = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 424
|
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCAmelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def lowerCamelCase (a_ :Optional[int] , a_ :tuple , a_ :Path , a_ :str , a_ :int , a_ :List[Any] , a_ :Any , a_ :Union[str, Any]=False , ) -> Dict:
output_path.parent.mkdir(parents=a_ , exist_ok=a_)
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
a_ , a_ , f=output_path.as_posix() , input_names=a_ , output_names=a_ , dynamic_axes=a_ , do_constant_folding=a_ , use_external_data_format=a_ , enable_onnx_checker=a_ , opset_version=a_ , )
else:
export(
a_ , a_ , f=output_path.as_posix() , input_names=a_ , output_names=a_ , dynamic_axes=a_ , do_constant_folding=a_ , opset_version=a_ , )
@torch.no_grad()
def lowerCamelCase (a_ :str , a_ :str , a_ :int , a_ :bool = False) -> Union[str, Any]:
lowercase :Any = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase :Union[str, Any] = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''')
else:
lowercase :List[str] = '''cpu'''
lowercase :List[str] = StableDiffusionPipeline.from_pretrained(a_ , torch_dtype=a_).to(a_)
lowercase :List[Any] = Path(a_)
# TEXT ENCODER
lowercase :List[Any] = pipeline.text_encoder.config.max_position_embeddings
lowercase :Dict = pipeline.text_encoder.config.hidden_size
lowercase :Union[str, Any] = pipeline.tokenizer(
'''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=a_ , return_tensors='''pt''' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=a_ , dtype=torch.intaa)) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''sequence'''},
} , opset=a_ , )
del pipeline.text_encoder
# UNET
lowercase :Any = pipeline.unet.config.in_channels
lowercase :List[Any] = pipeline.unet.config.sample_size
lowercase :Optional[int] = output_path / '''unet''' / '''model.onnx'''
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , a_ , a_ , a_).to(device=a_ , dtype=a_),
torch.randn(2).to(device=a_ , dtype=a_),
torch.randn(2 , a_ , a_).to(device=a_ , dtype=a_),
False,
) , output_path=a_ , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''timestep''': {0: '''batch'''},
'''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''},
} , opset=a_ , use_external_data_format=a_ , )
lowercase :List[Any] = str(unet_path.absolute().as_posix())
lowercase :str = os.path.dirname(a_)
lowercase :Optional[Any] = onnx.load(a_)
# clean up existing tensor files
shutil.rmtree(a_)
os.mkdir(a_)
# collate external tensor files into one
onnx.save_model(
a_ , a_ , save_as_external_data=a_ , all_tensors_to_one_file=a_ , location='''weights.pb''' , convert_attribute=a_ , )
del pipeline.unet
# VAE ENCODER
lowercase :Tuple = pipeline.vae
lowercase :Optional[Any] = vae_encoder.config.in_channels
lowercase :Any = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowercase :Any = lambda a_ , a_: vae_encoder.encode(a_ , a_)[0].sample()
onnx_export(
a_ , model_args=(
torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_),
False,
) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=a_ , )
# VAE DECODER
lowercase :Any = pipeline.vae
lowercase :Dict = vae_decoder.config.latent_channels
lowercase :Union[str, Any] = vae_decoder.config.out_channels
# forward only through the decoder part
lowercase :List[Any] = vae_encoder.decode
onnx_export(
a_ , model_args=(
torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=a_ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowercase :Dict = pipeline.safety_checker
lowercase :str = safety_checker.config.vision_config.num_channels
lowercase :str = safety_checker.config.vision_config.image_size
lowercase :List[str] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , a_ , a_ , a_ , ).to(device=a_ , dtype=a_),
torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_),
) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={
'''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''},
} , opset=a_ , )
del pipeline.safety_checker
lowercase :Tuple = OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''')
lowercase :Optional[Any] = pipeline.feature_extractor
else:
lowercase :int = None
lowercase :Union[str, Any] = None
lowercase :Optional[int] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''') , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''') , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''') , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''') , scheduler=pipeline.scheduler , safety_checker=a_ , feature_extractor=a_ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(a_)
print('''ONNX pipeline saved to''' , a_)
del pipeline
del onnx_pipeline
lowercase :Tuple = OnnxStableDiffusionPipeline.from_pretrained(a_ , provider='''CPUExecutionProvider''')
print('''ONNX pipeline is loadable''')
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
UpperCAmelCase = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 677
| 0
|
def __a ( __lowerCAmelCase , __lowerCAmelCase = " " ) -> list:
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : List[Any] = 0
for index, char in enumerate(__lowerCAmelCase ):
if char == separator:
split_words.append(string[last_index:index] )
SCREAMING_SNAKE_CASE : Tuple = index + 1
elif index + 1 == len(__lowerCAmelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 714
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
_lowerCamelCase : Dict = logging.getLogger(__name__)
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE : Optional[Any] = np.argmax(__lowerCAmelCase , axis=1 )
return np.sum(outputs == labels )
def __a ( __lowerCAmelCase ) -> Any:
with open(__lowerCAmelCase , encoding='utf_8' ) as f:
SCREAMING_SNAKE_CASE : Any = csv.reader(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = []
next(__lowerCAmelCase ) # skip the first line
for line in tqdm(__lowerCAmelCase ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def __a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE : Dict = []
for dataset in encoded_datasets:
SCREAMING_SNAKE_CASE : Union[str, Any] = len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
SCREAMING_SNAKE_CASE : Optional[Any] = np.zeros((n_batch, 2) , dtype=np.intaa )
SCREAMING_SNAKE_CASE : Dict = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
SCREAMING_SNAKE_CASE : Dict = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE : str = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
SCREAMING_SNAKE_CASE : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
SCREAMING_SNAKE_CASE : List[str] = with_conta
SCREAMING_SNAKE_CASE : Optional[Any] = with_conta
SCREAMING_SNAKE_CASE : Dict = len(__lowerCAmelCase ) - 1
SCREAMING_SNAKE_CASE : str = len(__lowerCAmelCase ) - 1
SCREAMING_SNAKE_CASE : Tuple = with_conta
SCREAMING_SNAKE_CASE : str = with_conta
SCREAMING_SNAKE_CASE : Any = mc_label
SCREAMING_SNAKE_CASE : List[str] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__lowerCAmelCase ) for t in all_inputs ) )
return tensor_datasets
def __a ( ) -> Any:
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=__lowerCAmelCase , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=__lowerCAmelCase , default='' )
parser.add_argument('--eval_dataset' , type=__lowerCAmelCase , default='' )
parser.add_argument('--seed' , type=__lowerCAmelCase , default=42 )
parser.add_argument('--num_train_epochs' , type=__lowerCAmelCase , default=3 )
parser.add_argument('--train_batch_size' , type=__lowerCAmelCase , default=8 )
parser.add_argument('--eval_batch_size' , type=__lowerCAmelCase , default=16 )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=__lowerCAmelCase , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=__lowerCAmelCase , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=__lowerCAmelCase , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=__lowerCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=__lowerCAmelCase , default=6.25E-5 )
parser.add_argument('--warmup_steps' , default=0 , type=__lowerCAmelCase , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=__lowerCAmelCase , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=__lowerCAmelCase , default=0.01 )
parser.add_argument('--lm_coef' , type=__lowerCAmelCase , default=0.9 )
parser.add_argument('--n_valid' , type=__lowerCAmelCase , default=374 )
parser.add_argument('--server_ip' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
print(__lowerCAmelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
SCREAMING_SNAKE_CASE : Dict = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(__lowerCAmelCase , __lowerCAmelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
SCREAMING_SNAKE_CASE : List[str] = ['_start_', '_delimiter_', '_classify_']
SCREAMING_SNAKE_CASE : int = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Any = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__lowerCAmelCase ) )
model.to(__lowerCAmelCase )
# Load and encode the datasets
def tokenize_and_encode(__lowerCAmelCase ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__lowerCAmelCase ) )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return obj
return [tokenize_and_encode(__lowerCAmelCase ) for o in obj]
logger.info('Encoding dataset...' )
SCREAMING_SNAKE_CASE : str = load_rocstories_dataset(args.train_dataset )
SCREAMING_SNAKE_CASE : str = load_rocstories_dataset(args.eval_dataset )
SCREAMING_SNAKE_CASE : Tuple = (train_dataset, eval_dataset)
SCREAMING_SNAKE_CASE : List[Any] = tokenize_and_encode(__lowerCAmelCase )
# Compute the max input length for the Transformer
SCREAMING_SNAKE_CASE : Dict = model.config.n_positions // 2 - 2
SCREAMING_SNAKE_CASE : int = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
SCREAMING_SNAKE_CASE : str = min(__lowerCAmelCase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
SCREAMING_SNAKE_CASE : List[str] = pre_process_datasets(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = tensor_datasets[0], tensor_datasets[1]
SCREAMING_SNAKE_CASE : str = TensorDataset(*__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Dict = RandomSampler(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.train_batch_size )
SCREAMING_SNAKE_CASE : Any = TensorDataset(*__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = SequentialSampler(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
SCREAMING_SNAKE_CASE : List[str] = args.max_steps
SCREAMING_SNAKE_CASE : Tuple = args.max_steps // (len(__lowerCAmelCase ) // args.gradient_accumulation_steps) + 1
else:
SCREAMING_SNAKE_CASE : Dict = len(__lowerCAmelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
SCREAMING_SNAKE_CASE : Dict = list(model.named_parameters() )
SCREAMING_SNAKE_CASE : int = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
SCREAMING_SNAKE_CASE : Tuple = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
SCREAMING_SNAKE_CASE : Optional[int] = AdamW(__lowerCAmelCase , lr=args.learning_rate , eps=args.adam_epsilon )
SCREAMING_SNAKE_CASE : List[Any] = get_linear_schedule_with_warmup(
__lowerCAmelCase , num_warmup_steps=args.warmup_steps , num_training_steps=__lowerCAmelCase )
if args.do_train:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = tqdm(__lowerCAmelCase , desc='Training' )
for step, batch in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE : List[str] = tuple(t.to(__lowerCAmelCase ) for t in batch )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = batch
SCREAMING_SNAKE_CASE : Tuple = model(__lowerCAmelCase , mc_token_ids=__lowerCAmelCase , lm_labels=__lowerCAmelCase , mc_labels=__lowerCAmelCase )
SCREAMING_SNAKE_CASE : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
SCREAMING_SNAKE_CASE : Optional[Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
SCREAMING_SNAKE_CASE : Tuple = 'Training loss: {:.2e} lr: {:.2e}'.format(__lowerCAmelCase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = model.module if hasattr(__lowerCAmelCase , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(args.output_dir , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(args.output_dir , __lowerCAmelCase )
torch.save(model_to_save.state_dict() , __lowerCAmelCase )
model_to_save.config.to_json_file(__lowerCAmelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
SCREAMING_SNAKE_CASE : List[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
SCREAMING_SNAKE_CASE : List[str] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__lowerCAmelCase )
if args.do_eval:
model.eval()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 0, 0
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = 0, 0
for batch in tqdm(__lowerCAmelCase , desc='Evaluating' ):
SCREAMING_SNAKE_CASE : Optional[int] = tuple(t.to(__lowerCAmelCase ) for t in batch )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = batch
with torch.no_grad():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = model(
__lowerCAmelCase , mc_token_ids=__lowerCAmelCase , lm_labels=__lowerCAmelCase , mc_labels=__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Any = mc_logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE : List[Any] = mc_labels.to('cpu' ).numpy()
SCREAMING_SNAKE_CASE : Optional[int] = accuracy(__lowerCAmelCase , __lowerCAmelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
SCREAMING_SNAKE_CASE : Optional[int] = eval_loss / nb_eval_steps
SCREAMING_SNAKE_CASE : str = eval_accuracy / nb_eval_examples
SCREAMING_SNAKE_CASE : Any = tr_loss / nb_tr_steps if args.do_train else None
SCREAMING_SNAKE_CASE : Dict = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(args.output_dir , 'eval_results.txt' )
with open(__lowerCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , __lowerCAmelCase , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 308
| 0
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any]=1_3 , SCREAMING_SNAKE_CASE : Union[str, Any]=7 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : Dict=9_9 , SCREAMING_SNAKE_CASE : Union[str, Any]=6_4 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : Tuple=4 , SCREAMING_SNAKE_CASE : Union[str, Any]=3_7 , SCREAMING_SNAKE_CASE : Dict="gelu" , SCREAMING_SNAKE_CASE : Dict=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=5_1_2 , SCREAMING_SNAKE_CASE : Optional[int]=1_6 , SCREAMING_SNAKE_CASE : Dict=2 , SCREAMING_SNAKE_CASE : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE : Optional[Any]=3 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : Optional[Any]=None , ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = vocab_size - 1
def __A ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def __A ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def __A ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase = True
return config, input_ids, input_mask, token_labels
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = GPTNeoXModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = GPTNeoXModel(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
"""simple docstring"""
lowerCAmelCase = GPTNeoXForCausalLM(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = GPTNeoXForQuestionAnswering(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] ) -> str:
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = GPTNeoXForSequenceClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = GPTNeoXForTokenClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = GPTNeoXForCausalLM(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
# first forward pass
lowerCAmelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE )
lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE )
lowerCAmelCase = output_from_no_past["hidden_states"][0]
lowerCAmelCase = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )["hidden_states"][0]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 ) )
def __A ( self : Dict ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCAmelCase = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def __A ( self : str ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = GPTNeoXModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=6_4 , num_attention_heads=8 )
def __A ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __A ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __A ( self : int ) -> int:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __A ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase = None
self.model_tester.create_and_check_model_as_decoder(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __A ( self : List[str] ) -> int:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __A ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE )
def __A ( self : List[str] ) -> int:
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE )
def __A ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE )
def __A ( self : Dict ) -> Any:
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE )
@unittest.skip(reason="Feed forward chunking is not implemented" )
def __A ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = ids_tensor([1, 1_0] , config.vocab_size )
lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = GPTNeoXModel(SCREAMING_SNAKE_CASE )
original_model.to(SCREAMING_SNAKE_CASE )
original_model.eval()
lowerCAmelCase = original_model(SCREAMING_SNAKE_CASE ).last_hidden_state
lowerCAmelCase = original_model(SCREAMING_SNAKE_CASE ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = {"type": scaling_type, "factor": 1_0.0}
lowerCAmelCase = GPTNeoXModel(SCREAMING_SNAKE_CASE )
scaled_model.to(SCREAMING_SNAKE_CASE )
scaled_model.eval()
lowerCAmelCase = scaled_model(SCREAMING_SNAKE_CASE ).last_hidden_state
lowerCAmelCase = scaled_model(SCREAMING_SNAKE_CASE ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-5 ) )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = AutoTokenizer.from_pretrained("EleutherAI/pythia-410m-deduped" )
for checkpointing in [True, False]:
lowerCAmelCase = GPTNeoXForCausalLM.from_pretrained("EleutherAI/pythia-410m-deduped" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer("My favorite food is" , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowerCAmelCase = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"
lowerCAmelCase = model.generate(**SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , max_new_tokens=2_0 )
lowerCAmelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE )[0]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 649
|
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( A__ , A__ , A__ ) -> str:
# Initialise PyTorch model
lowerCAmelCase = BertConfig.from_json_file(A__ )
print(f"Building PyTorch model from configuration: {config}" )
lowerCAmelCase = BertForPreTraining(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(A__ , A__ , A__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 649
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase ( self ):
__UpperCamelCase : List[Any] = 1
__UpperCamelCase : List[str] = 3
__UpperCamelCase : Dict = (3_2, 3_2)
__UpperCamelCase : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCamelCase )
return image
@property
def lowerCAmelCase ( self ):
torch.manual_seed(0 )
__UpperCamelCase : Any = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=_lowerCamelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def lowerCAmelCase ( self ):
torch.manual_seed(0 )
__UpperCamelCase : List[str] = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def lowerCAmelCase ( self ):
torch.manual_seed(0 )
__UpperCamelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
return CLIPTextModel(_lowerCamelCase )
def lowerCAmelCase ( self ):
__UpperCamelCase : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : str = self.dummy_cond_unet_upscale
__UpperCamelCase : Union[str, Any] = DDPMScheduler()
__UpperCamelCase : str = DDIMScheduler(prediction_type='v_prediction' )
__UpperCamelCase : Tuple = self.dummy_vae
__UpperCamelCase : Tuple = self.dummy_text_encoder
__UpperCamelCase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase : Dict = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase : Any = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('RGB' ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
__UpperCamelCase : List[str] = StableDiffusionUpscalePipeline(
unet=_lowerCamelCase , low_res_scheduler=_lowerCamelCase , scheduler=_lowerCamelCase , vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , max_noise_level=3_5_0 , )
__UpperCamelCase : List[Any] = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
__UpperCamelCase : List[str] = 'A painting of a squirrel eating a burger'
__UpperCamelCase : int = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__UpperCamelCase : str = sd_pipe(
[prompt] , image=_lowerCamelCase , generator=_lowerCamelCase , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , )
__UpperCamelCase : Any = output.images
__UpperCamelCase : Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__UpperCamelCase : Optional[Any] = sd_pipe(
[prompt] , image=_lowerCamelCase , generator=_lowerCamelCase , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , return_dict=_lowerCamelCase , )[0]
__UpperCamelCase : List[str] = image[0, -3:, -3:, -1]
__UpperCamelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
__UpperCamelCase : str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__UpperCamelCase : Tuple = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self ):
__UpperCamelCase : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : Tuple = self.dummy_cond_unet_upscale
__UpperCamelCase : Optional[Any] = DDPMScheduler()
__UpperCamelCase : Dict = DDIMScheduler(prediction_type='v_prediction' )
__UpperCamelCase : int = self.dummy_vae
__UpperCamelCase : List[Any] = self.dummy_text_encoder
__UpperCamelCase : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase : List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase : Union[str, Any] = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('RGB' ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
__UpperCamelCase : Tuple = StableDiffusionUpscalePipeline(
unet=_lowerCamelCase , low_res_scheduler=_lowerCamelCase , scheduler=_lowerCamelCase , vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , max_noise_level=3_5_0 , )
__UpperCamelCase : List[Any] = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
__UpperCamelCase : Optional[Any] = 'A painting of a squirrel eating a burger'
__UpperCamelCase : Optional[Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , )
__UpperCamelCase : Optional[int] = output.images
assert image.shape[0] == 2
__UpperCamelCase : Optional[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__UpperCamelCase : List[Any] = sd_pipe(
[prompt] , image=_lowerCamelCase , generator=_lowerCamelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , )
__UpperCamelCase : List[str] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowerCAmelCase ( self ):
__UpperCamelCase : int = self.dummy_cond_unet_upscale
__UpperCamelCase : Tuple = DDPMScheduler()
__UpperCamelCase : List[Any] = DDIMScheduler(prediction_type='v_prediction' )
__UpperCamelCase : str = self.dummy_vae
__UpperCamelCase : Union[str, Any] = self.dummy_text_encoder
__UpperCamelCase : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase : Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase : Tuple = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('RGB' ).resize((6_4, 6_4) )
# put models in fp16, except vae as it overflows in fp16
__UpperCamelCase : Optional[int] = unet.half()
__UpperCamelCase : int = text_encoder.half()
# make sure here that pndm scheduler skips prk
__UpperCamelCase : Optional[Any] = StableDiffusionUpscalePipeline(
unet=_lowerCamelCase , low_res_scheduler=_lowerCamelCase , scheduler=_lowerCamelCase , vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , max_noise_level=3_5_0 , )
__UpperCamelCase : Optional[int] = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
__UpperCamelCase : Tuple = 'A painting of a squirrel eating a burger'
__UpperCamelCase : Tuple = torch.manual_seed(0 )
__UpperCamelCase : Tuple = sd_pipe(
[prompt] , image=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=2 , output_type='np' , ).images
__UpperCamelCase : Optional[Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self ):
__UpperCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
__UpperCamelCase : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
__UpperCamelCase : Union[str, Any] = 'stabilityai/stable-diffusion-x4-upscaler'
__UpperCamelCase : Tuple = StableDiffusionUpscalePipeline.from_pretrained(_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
__UpperCamelCase : Optional[Any] = 'a cat sitting on a park bench'
__UpperCamelCase : Union[str, Any] = torch.manual_seed(0 )
__UpperCamelCase : Tuple = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , generator=_lowerCamelCase , output_type='np' , )
__UpperCamelCase : List[str] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def lowerCAmelCase ( self ):
__UpperCamelCase : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
__UpperCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
__UpperCamelCase : str = 'stabilityai/stable-diffusion-x4-upscaler'
__UpperCamelCase : Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(
_lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
__UpperCamelCase : Tuple = 'a cat sitting on a park bench'
__UpperCamelCase : Any = torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , generator=_lowerCamelCase , output_type='np' , )
__UpperCamelCase : Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCAmelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCamelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
__UpperCamelCase : Optional[Any] = 'stabilityai/stable-diffusion-x4-upscaler'
__UpperCamelCase : Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(
_lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__UpperCamelCase : str = 'a cat sitting on a park bench'
__UpperCamelCase : List[str] = torch.manual_seed(0 )
__UpperCamelCase : List[Any] = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , output_type='np' , )
__UpperCamelCase : str = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9
| 287
|
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
a= logging.get_logger(__name__)
class __lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self , **_lowerCamelCase ):
requires_backends(self , ['bs4'] )
super().__init__(**_lowerCamelCase )
def lowerCAmelCase ( self , _lowerCamelCase ):
__UpperCamelCase : str = []
__UpperCamelCase : Dict = []
__UpperCamelCase : Union[str, Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__UpperCamelCase : str = parent.find_all(child.name , recursive=_lowerCamelCase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_lowerCamelCase ) else next(i for i, s in enumerate(_lowerCamelCase , 1 ) if s is child ) )
__UpperCamelCase : Union[str, Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCAmelCase ( self , _lowerCamelCase ):
__UpperCamelCase : List[str] = BeautifulSoup(_lowerCamelCase , 'html.parser' )
__UpperCamelCase : int = []
__UpperCamelCase : List[Any] = []
__UpperCamelCase : int = []
for element in html_code.descendants:
if type(_lowerCamelCase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__UpperCamelCase : Tuple = html.unescape(_lowerCamelCase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_lowerCamelCase )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = self.xpath_soup(_lowerCamelCase )
stringaxtag_seq.append(_lowerCamelCase )
stringaxsubs_seq.append(_lowerCamelCase )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError('Number of doc strings and xtags does not correspond' )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError('Number of doc strings and xsubs does not correspond' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ):
__UpperCamelCase : Dict = ''
for tagname, subs in zip(_lowerCamelCase , _lowerCamelCase ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self , _lowerCamelCase ):
__UpperCamelCase : List[Any] = False
# Check that strings has a valid type
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__UpperCamelCase : List[str] = True
elif isinstance(_lowerCamelCase , (list, tuple) ):
if len(_lowerCamelCase ) == 0 or isinstance(html_strings[0] , _lowerCamelCase ):
__UpperCamelCase : Optional[int] = True
if not valid_strings:
raise ValueError(
'HTML strings must of type `str`, `List[str]` (batch of examples), '
f"""but is of type {type(_lowerCamelCase )}.""" )
__UpperCamelCase : Dict = bool(isinstance(_lowerCamelCase , (list, tuple) ) and (isinstance(html_strings[0] , _lowerCamelCase )) )
if not is_batched:
__UpperCamelCase : Tuple = [html_strings]
# Get nodes + xpaths
__UpperCamelCase : Tuple = []
__UpperCamelCase : Tuple = []
for html_string in html_strings:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : str = self.get_three_from_single(_lowerCamelCase )
nodes.append(_lowerCamelCase )
__UpperCamelCase : Union[str, Any] = []
for node, tag_list, sub_list in zip(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
__UpperCamelCase : Optional[Any] = self.construct_xpath(_lowerCamelCase , _lowerCamelCase )
xpath_strings.append(_lowerCamelCase )
xpaths.append(_lowerCamelCase )
# return as Dict
__UpperCamelCase : Optional[int] = {'nodes': nodes, 'xpaths': xpaths}
__UpperCamelCase : Union[str, Any] = BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
return encoded_inputs
| 287
| 1
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowercase : str = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowercase : Any = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowercase : int = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = len([g for position, g in enumerate(snake_case_ ) if g == main_target[position]] )
return (item, float(snake_case_ ))
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = random.randint(0 , len(snake_case_ ) - 1 )
__UpperCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
__UpperCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase__ ( snake_case_ :str , snake_case_ :list[str] ):
__UpperCAmelCase = list(snake_case_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__UpperCAmelCase = random.choice(snake_case_ )
return "".join(snake_case_ )
def lowercase__ ( snake_case_ :tuple[str, float] , snake_case_ :list[tuple[str, float]] , snake_case_ :list[str] , ):
__UpperCAmelCase = []
# Generate more children proportionally to the fitness score.
__UpperCAmelCase = int(parent_a[1] * 100 ) + 1
__UpperCAmelCase = 10 if child_n >= 10 else child_n
for _ in range(snake_case_ ):
__UpperCAmelCase = population_score[random.randint(0 , snake_case_ )][0]
__UpperCAmelCase , __UpperCAmelCase = crossover(parent_a[0] , snake_case_ )
# Append new string to the population list.
pop.append(mutate(snake_case_ , snake_case_ ) )
pop.append(mutate(snake_case_ , snake_case_ ) )
return pop
def lowercase__ ( snake_case_ :str , snake_case_ :list[str] , snake_case_ :bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__UpperCAmelCase = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(snake_case_ )
# Verify that the target contains no genes besides the ones inside genes variable.
__UpperCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__UpperCAmelCase = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(snake_case_ )
# Generate random starting population.
__UpperCAmelCase = []
for _ in range(snake_case_ ):
population.append(''''''.join([random.choice(snake_case_ ) for i in range(len(snake_case_ ) )] ) )
# Just some logs to know what the algorithms is doing.
__UpperCAmelCase , __UpperCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(snake_case_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__UpperCAmelCase = [evaluate(snake_case_ , snake_case_ ) for item in population]
# Check if there is a matching evolution.
__UpperCAmelCase = sorted(snake_case_ , key=lambda snake_case_ : x[1] , reverse=snake_case_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__UpperCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(snake_case_ )
# Normalize population score to be between 0 and 1.
__UpperCAmelCase = [
(item, score / len(snake_case_ )) for item, score in population_score
]
# This is selection
for i in range(snake_case_ ):
population.extend(select(population_score[int(snake_case_ )] , snake_case_ , snake_case_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(snake_case_ ) > N_POPULATION:
break
if __name__ == "__main__":
_lowercase : Optional[int] = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
_lowercase : Union[str, Any] = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
_lowercase ,_lowercase ,_lowercase : Optional[int] = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 49
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __lowercase :
def __init__(self : Dict , snake_case : Dict , snake_case : List[Any]=13 , snake_case : int=7 , snake_case : List[str]=True , snake_case : List[Any]=True , snake_case : Union[str, Any]=True , snake_case : Dict=True , snake_case : Optional[Any]=99 , snake_case : str=32 , snake_case : Any=2 , snake_case : Optional[Any]=4 , snake_case : int=37 , snake_case : Dict="gelu" , snake_case : Dict=0.1 , snake_case : Dict=0.1 , snake_case : Optional[int]=512 , snake_case : Dict=16 , snake_case : Union[str, Any]=2 , snake_case : Dict=0.02 , snake_case : str=3 , snake_case : str=4 , snake_case : Optional[Any]=None , snake_case : str=0 , ) -> Optional[Any]:
_lowercase : int = parent
_lowercase : Tuple = batch_size
_lowercase : str = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : List[Any] = use_input_mask
_lowercase : Tuple = use_token_type_ids
_lowercase : List[Any] = use_labels
_lowercase : List[str] = vocab_size
_lowercase : Dict = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Dict = intermediate_size
_lowercase : List[Any] = hidden_act
_lowercase : Optional[Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : int = max_position_embeddings
_lowercase : Tuple = type_vocab_size
_lowercase : List[str] = type_sequence_label_size
_lowercase : str = initializer_range
_lowercase : Tuple = num_labels
_lowercase : List[Any] = num_choices
_lowercase : Dict = scope
_lowercase : Optional[Any] = projection_dim
def _a(self : Tuple ) -> List[str]:
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Optional[int] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_lowercase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : List[Any] = None
if self.use_token_type_ids:
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Tuple = None
_lowercase : Optional[Any] = None
_lowercase : Optional[Any] = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Union[str, Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
_lowercase : Any = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a(self : Union[str, Any] , snake_case : Any , snake_case : Tuple , snake_case : str , snake_case : str , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : str ) -> List[str]:
_lowercase : List[Any] = TFDPRContextEncoder(config=snake_case )
_lowercase : Dict = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
_lowercase : Optional[int] = model(snake_case , token_type_ids=snake_case )
_lowercase : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _a(self : List[Any] , snake_case : List[str] , snake_case : Tuple , snake_case : Any , snake_case : int , snake_case : Optional[int] , snake_case : Any , snake_case : Union[str, Any] ) -> int:
_lowercase : Optional[Any] = TFDPRQuestionEncoder(config=snake_case )
_lowercase : int = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
_lowercase : str = model(snake_case , token_type_ids=snake_case )
_lowercase : str = model(snake_case )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _a(self : Union[str, Any] , snake_case : Any , snake_case : int , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Any:
_lowercase : Any = TFDPRReader(config=snake_case )
_lowercase : Optional[Any] = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _a(self : int ) -> Optional[Any]:
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : int = config_and_inputs
_lowercase : int = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class __lowercase ( __snake_case , __snake_case , unittest.TestCase ):
_A = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
_A = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
_A = False
_A = False
_A = False
_A = False
_A = False
def _a(self : str ) -> List[str]:
_lowercase : List[Any] = TFDPRModelTester(self )
_lowercase : Any = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _a(self : str ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a(self : Any ) -> Tuple:
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*snake_case )
def _a(self : Dict ) -> Optional[Any]:
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*snake_case )
def _a(self : List[Any] ) -> Any:
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*snake_case )
@slow
def _a(self : int ) -> str:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : str = TFDPRContextEncoder.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[str] = TFDPRContextEncoder.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : int = TFDPRQuestionEncoder.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Any = TFDPRReader.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_tf
class __lowercase ( unittest.TestCase ):
@slow
def _a(self : Dict ) -> Any:
_lowercase : Any = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
_lowercase : Optional[int] = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
_lowercase : List[Any] = model(snake_case )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_lowercase : Optional[Any] = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 461
| 0
|
"""simple docstring"""
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase__ = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Any = _ask_options(
'''In which compute environment are you running?''', ['''This machine''', '''AWS (Amazon SageMaker)'''], _convert_compute_environment, )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_lowerCamelCase : str = get_sagemaker_input()
else:
_lowerCamelCase : List[Any] = get_cluster_input()
return config
def snake_case_ ( A_ : Union[str, Any]=None ):
'''simple docstring'''
if subparsers is not None:
_lowerCamelCase : Tuple = subparsers.add_parser('''config''', description=A_ )
else:
_lowerCamelCase : str = argparse.ArgumentParser('''Accelerate config command''', description=A_ )
parser.add_argument(
'''--config_file''', default=A_, help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
), )
if subparsers is not None:
parser.set_defaults(func=A_ )
return parser
def snake_case_ ( A_ : str ):
'''simple docstring'''
_lowerCamelCase : int = get_user_input()
if args.config_file is not None:
_lowerCamelCase : Dict = args.config_file
else:
if not os.path.isdir(A_ ):
os.makedirs(A_ )
_lowerCamelCase : Optional[int] = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(A_ )
else:
config.to_yaml_file(A_ )
print(F'''accelerate configuration saved at {config_file}''' )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = config_command_parser()
_lowerCamelCase : Any = parser.parse_args()
config_command(A_ )
if __name__ == "__main__":
main()
| 705
|
"""simple docstring"""
import collections
import os
import re
from pathlib import Path
lowerCAmelCase__ = '''src/transformers'''
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase__ = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase__ = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
lowerCAmelCase__ = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase__ = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase__ = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase__ = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
lowerCAmelCase__ = re.compile(R'''^\s*try:''')
# Catches a line with else:
lowerCAmelCase__ = re.compile(R'''^\s*else:''')
def snake_case_ ( A_ : Optional[Any] ):
'''simple docstring'''
if _re_test_backend.search(A_ ) is None:
return None
_lowerCamelCase : Dict = [b[0] for b in _re_backend.findall(A_ )]
backends.sort()
return "_and_".join(A_ )
def snake_case_ ( A_ : Optional[Any] ):
'''simple docstring'''
with open(A_, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
_lowerCamelCase : List[str] = f.readlines()
_lowerCamelCase : Dict = 0
while line_index < len(A_ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(A_ ):
return None
# First grab the objects without a specific backend in _import_structure
_lowerCamelCase : Union[str, Any] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
_lowerCamelCase : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(A_ ):
_lowerCamelCase : Union[str, Any] = _re_one_line_import_struct.search(A_ ).groups()[0]
_lowerCamelCase : List[str] = re.findall(R'''\[([^\]]+)\]''', A_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
_lowerCamelCase : List[Any] = _re_import_struct_key_value.search(A_ )
if single_line_import_search is not None:
_lowerCamelCase : Dict = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(A_ ) > 0]
objects.extend(A_ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
_lowerCamelCase : Optional[int] = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_lowerCamelCase : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCamelCase : Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCamelCase : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
_lowerCamelCase : List[str] = lines[line_index]
if _re_import_struct_add_one.search(A_ ) is not None:
objects.append(_re_import_struct_add_one.search(A_ ).groups()[0] )
elif _re_import_struct_add_many.search(A_ ) is not None:
_lowerCamelCase : Optional[Any] = _re_import_struct_add_many.search(A_ ).groups()[0].split(''', ''' )
_lowerCamelCase : Optional[int] = [obj[1:-1] for obj in imports if len(A_ ) > 0]
objects.extend(A_ )
elif _re_between_brackets.search(A_ ) is not None:
_lowerCamelCase : Union[str, Any] = _re_between_brackets.search(A_ ).groups()[0].split(''', ''' )
_lowerCamelCase : Optional[int] = [obj[1:-1] for obj in imports if len(A_ ) > 0]
objects.extend(A_ )
elif _re_quote_object.search(A_ ) is not None:
objects.append(_re_quote_object.search(A_ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
_lowerCamelCase : Dict = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_lowerCamelCase : Optional[int] = []
while (
line_index < len(A_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
_lowerCamelCase : List[str] = lines[line_index]
_lowerCamelCase : List[str] = _re_import.search(A_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_lowerCamelCase : Optional[Any] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(A_ ):
# If the line is an if is_backend_available, we grab all objects associated.
_lowerCamelCase : List[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCamelCase : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCamelCase : Optional[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
_lowerCamelCase : int = lines[line_index]
_lowerCamelCase : Any = _re_import.search(A_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_lowerCamelCase : int = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def snake_case_ ( A_ : Dict, A_ : List[Any] ):
'''simple docstring'''
def find_duplicates(A_ : Dict ):
return [k for k, v in collections.Counter(A_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_lowerCamelCase : Optional[int] = []
for key in import_dict_objects.keys():
_lowerCamelCase : Tuple = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_lowerCamelCase : Optional[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_lowerCamelCase : str = '''base imports''' if key == '''none''' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = []
for root, _, files in os.walk(A_ ):
if "__init__.py" in files:
_lowerCamelCase : Union[str, Any] = os.path.join(A_, '''__init__.py''' )
_lowerCamelCase : List[str] = parse_init(A_ )
if objects is not None:
_lowerCamelCase : str = analyze_results(*A_ )
if len(A_ ) > 0:
_lowerCamelCase : Dict = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(A_ ) )
if len(A_ ) > 0:
raise ValueError('''\n\n'''.join(A_ ) )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Tuple = []
for path, directories, files in os.walk(A_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(A_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(A_ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
_lowerCamelCase : str = str((Path(A_ ) / folder).relative_to(A_ ) )
_lowerCamelCase : Any = short_path.replace(os.path.sep, '''.''' )
submodules.append(A_ )
for fname in files:
if fname == "__init__.py":
continue
_lowerCamelCase : List[str] = str((Path(A_ ) / fname).relative_to(A_ ) )
_lowerCamelCase : Any = short_path.replace('''.py''', '''''' ).replace(os.path.sep, '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(A_ )
return submodules
lowerCAmelCase__ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def snake_case_ ( ):
'''simple docstring'''
from transformers.utils import direct_transformers_import
_lowerCamelCase : Union[str, Any] = direct_transformers_import(A_ )
_lowerCamelCase : Union[str, Any] = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(A_, '''__init__.py''' ), '''r''' ) as f:
_lowerCamelCase : Tuple = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''', A_ ) ) )
_lowerCamelCase : List[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(A_ ) > 0:
_lowerCamelCase : Optional[Any] = '''\n'''.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 598
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Any = DiTPipeline
_UpperCamelCase : int = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCamelCase : Dict = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCamelCase : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Union[str, Any] = False
def __a ( self ):
torch.manual_seed(0 )
_lowercase : str = TransformeraDModel(
sample_size=1_6 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_lowerCAmelCase , activation_fn='gelu-approximate' , num_embeds_ada_norm=1_0_0_0 , norm_type='ada_norm_zero' , norm_elementwise_affine=_lowerCAmelCase , )
_lowercase : int = AutoencoderKL()
_lowercase : List[str] = DDIMScheduler()
_lowercase : Optional[int] = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
if str(_lowerCAmelCase ).startswith('mps' ):
_lowercase : List[str] = torch.manual_seed(_lowerCAmelCase )
else:
_lowercase : Any = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowercase : Union[str, Any] = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __a ( self ):
_lowercase : Dict = 'cpu'
_lowercase : List[Any] = self.get_dummy_components()
_lowercase : List[str] = self.pipeline_class(**_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Union[str, Any] = self.get_dummy_inputs(_lowerCAmelCase )
_lowercase : Dict = pipe(**_lowerCAmelCase ).images
_lowercase : List[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 1_6, 1_6, 3) )
_lowercase : Optional[int] = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
_lowercase : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCAmelCase , 1E-3 )
def __a ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_lowerCAmelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __a ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ):
_lowercase : List[Any] = torch.manual_seed(0 )
_lowercase : Optional[Any] = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
_lowercase : List[Any] = ['vase', 'umbrella', 'white shark', 'white wolf']
_lowercase : List[str] = pipe.get_label_ids(_lowerCAmelCase )
_lowercase : Tuple = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=4_0 , output_type='np' ).images
for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = load_numpy(
F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def __a ( self ):
_lowercase : str = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
_lowercase : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
_lowercase : Optional[Any] = ['vase', 'umbrella']
_lowercase : List[Any] = pipe.get_label_ids(_lowerCAmelCase )
_lowercase : List[str] = torch.manual_seed(0 )
_lowercase : List[Any] = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=2_5 , output_type='np' ).images
for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
F"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 66
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
snake_case__ : List[Any] = BeautifulSoup(requests.get(__SCREAMING_SNAKE_CASE , params=__SCREAMING_SNAKE_CASE ).content , 'html.parser' )
snake_case__ : Optional[int] = soup.find('div' , attrs={'class': 'gs_ri'} )
snake_case__ : Dict = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
A_ = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 270
| 0
|
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
UpperCamelCase = get_tests_dir("""fixtures/dummy-config.json""")
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : str = 0
def _snake_case ( self )->int:
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto''' ) )
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Union[str, Any] = AutoConfig.from_pretrained('''bert-base-uncased''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : str = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : str = AutoConfig.for_model('''roberta''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
A_ : str = os.path.join(_SCREAMING_SNAKE_CASE , '''fake-roberta''' )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , '''config.json''' ) , '''w''' ) as f:
f.write(json.dumps({} ) )
A_ : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertEqual(type(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Any:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , _SCREAMING_SNAKE_CASE )
# Wrong model type will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoConfig.register('''model''' , _SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoConfig.register('''bert''' , _SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
A_ : int = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_SCREAMING_SNAKE_CASE )
A_ : str = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _snake_case ( self )->Tuple:
'''simple docstring'''
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ):
A_ : Any = AutoConfig.from_pretrained('''bert-base''' )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
A_ : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE , revision='''aaaaaa''' )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , '''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''' , ):
A_ : int = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''' )
def _snake_case ( self )->int:
'''simple docstring'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ : Dict = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ : str = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_SCREAMING_SNAKE_CASE )
A_ : str = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_SCREAMING_SNAKE_CASE )
A_ : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_config.__class__.__name__ , '''NewModelConfig''' )
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = """new-model"""
try:
AutoConfig.register('''new-model''' , _SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
A_ : Union[str, Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote code is disabled, we load the local one.
A_ : Tuple = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote is enabled, we load from the Hub
A_ : List[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 716
|
UpperCamelCase = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCamelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCamelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 152
| 0
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
snake_case : Tuple = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""")
snake_case : List[str] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
snake_case : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def A ( __snake_case: str ) -> Tuple:
"""simple docstring"""
with open(__snake_case , 'rb' ) as f:
__magic_name__ = Image.open(__snake_case )
return im.convert('RGB' )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__UpperCAmelCase = field(
default=a_ , metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} , )
__UpperCAmelCase = field(
default=a_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""})
__UpperCAmelCase = field(default=a_ , metadata={"""help""": """A folder containing the training data."""})
__UpperCAmelCase = field(default=a_ , metadata={"""help""": """A folder containing the validation data."""})
__UpperCAmelCase = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""})
__UpperCAmelCase = field(
default=a_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__UpperCAmelCase = field(
default=a_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def a__ ( self : Tuple ):
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__UpperCAmelCase = field(
default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
__UpperCAmelCase = field(
default=a_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(a_)} , )
__UpperCAmelCase = field(
default=a_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
__UpperCAmelCase = field(
default=a_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""})
__UpperCAmelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCAmelCase = field(default=a_ , metadata={"""help""": """Name or path of preprocessor config."""})
__UpperCAmelCase = field(
default=a_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
__UpperCAmelCase = field(
default=a_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def A ( __snake_case: Dict ) -> Any:
"""simple docstring"""
__magic_name__ = torch.stack([example['pixel_values'] for example in examples] )
__magic_name__ = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def A ( ) -> List[str]:
"""simple docstring"""
__magic_name__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__magic_name__ , __magic_name__ , __magic_name__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__magic_name__ , __magic_name__ , __magic_name__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , __snake_case , __snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__magic_name__ = training_args.get_process_log_level()
logger.setLevel(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__magic_name__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__magic_name__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
__magic_name__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
__magic_name__ = {}
if data_args.train_dir is not None:
__magic_name__ = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
__magic_name__ = os.path.join(data_args.validation_dir , '**' )
__magic_name__ = load_dataset(
'imagefolder' , data_files=__snake_case , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
__magic_name__ = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __snake_case ) and data_args.train_val_split > 0.0:
__magic_name__ = dataset['train'].train_test_split(data_args.train_val_split )
__magic_name__ = split['train']
__magic_name__ = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__magic_name__ = dataset['train'].features['labels'].names
__magic_name__ , __magic_name__ = {}, {}
for i, label in enumerate(__snake_case ):
__magic_name__ = str(__snake_case )
__magic_name__ = label
# Load the accuracy metric from the datasets package
__magic_name__ = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__snake_case: str ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
__magic_name__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__snake_case ) , labelaid=__snake_case , idalabel=__snake_case , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__magic_name__ = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
__magic_name__ = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
__magic_name__ = image_processor.size['shortest_edge']
else:
__magic_name__ = (image_processor.size['height'], image_processor.size['width'])
__magic_name__ = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
__magic_name__ = Compose(
[
RandomResizedCrop(__snake_case ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
__magic_name__ = Compose(
[
Resize(__snake_case ),
CenterCrop(__snake_case ),
ToTensor(),
normalize,
] )
def train_transforms(__snake_case: str ):
__magic_name__ = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(__snake_case: List[Any] ):
__magic_name__ = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__magic_name__ = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(__snake_case )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__magic_name__ = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(__snake_case )
# Initalize our trainer
__magic_name__ = Trainer(
model=__snake_case , args=__snake_case , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=__snake_case , tokenizer=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
__magic_name__ = None
if training_args.resume_from_checkpoint is not None:
__magic_name__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__magic_name__ = last_checkpoint
__magic_name__ = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__magic_name__ = trainer.evaluate()
trainer.log_metrics('eval' , __snake_case )
trainer.save_metrics('eval' , __snake_case )
# Write model card and (optionally) push to hub
__magic_name__ = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__snake_case )
else:
trainer.create_model_card(**__snake_case )
if __name__ == "__main__":
main()
| 545
|
"""simple docstring"""
from maths.prime_check import is_prime
def A ( __snake_case: int ) -> int:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
__magic_name__ = F"""Input value of [number={number}] must be an integer"""
raise TypeError(__snake_case )
if is_prime(__snake_case ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 545
| 1
|
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : Tuple = credit_card_number
snake_case__ : int = 0
snake_case__ : Optional[Any] = len(snake_case_ ) - 2
for i in range(snake_case_ , -1 , -2 ):
# double the value of every second digit
snake_case__ : str = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
snake_case__ : Union[str, Any] = cc_number[:i] + str(snake_case_ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(snake_case_ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : List[Any] = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(snake_case_ ) <= 16:
print(F'''{error_message} of its length.''' )
return False
if not validate_initial_digits(snake_case_ ):
print(F'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(snake_case_ ):
print(F'''{error_message} it fails the Luhn check.''' )
return False
print(F'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 703
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowerCamelCase : Dict = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
snake_case__ : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 25
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.